diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 3780e6a7ddf..d43a7b0c072 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -77,6 +77,7 @@
/appengine/standard_python3/spanner/* @GoogleCloudPlatform/api-spanner-python @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
/asset/**/* @GoogleCloudPlatform/cloud-asset-analysis-team @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
/bigquery/**/* @chalmerlowe @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
+/bigquery/bigframes/**/* @tswast @GoogleCloudPlatform/api-bigquery @GoogleCloudPlatform/cloud-samples-reviewers
/bigquery/remote_function/**/* @autoerr @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
/cloud-media-livestream/**/* @GoogleCloudPlatform/cloud-media-team @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
/bigquery-connection/**/* @GoogleCloudPlatform/api-bigquery @GoogleCloudPlatform/python-samples-reviewers @GoogleCloudPlatform/cloud-samples-reviewers
diff --git a/bigquery/bigframes/.gitignore b/bigquery/bigframes/.gitignore
new file mode 100644
index 00000000000..6e1f113ef05
--- /dev/null
+++ b/bigquery/bigframes/.gitignore
@@ -0,0 +1 @@
+noxfile.py
diff --git a/bigquery/bigframes/README.md b/bigquery/bigframes/README.md
new file mode 100644
index 00000000000..a243089ef7f
--- /dev/null
+++ b/bigquery/bigframes/README.md
@@ -0,0 +1,10 @@
+# BigQuery DataFrames code samples
+
+This directory contains code samples for [BigQuery DataFrames (aka
+BigFrames)](https://dataframes.bigquery.dev/).
+
+To install BigQuery DataFrames, run:
+
+```
+pip install --upgrade bigframes
+```
diff --git a/bigquery/bigframes/call_python_udf.py b/bigquery/bigframes/call_python_udf.py
new file mode 100644
index 00000000000..200b1c1fb54
--- /dev/null
+++ b/bigquery/bigframes/call_python_udf.py
@@ -0,0 +1,105 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START bigquery_dataframes_call_python_udf]
+import textwrap
+from typing import Tuple
+
+import bigframes.pandas as bpd
+import pandas as pd
+import pyarrow as pa
+
+
+# Using partial ordering mode enables more efficient query optimizations.
+bpd.options.bigquery.ordering_mode = "partial"
+
+
+def call_python_udf(
+ project_id: str, location: str,
+) -> Tuple[pd.Series, bpd.Series]:
+ # Set the billing project to use for queries. This step is optional, as the
+ # project can be inferred from your environment in many cases.
+ bpd.options.bigquery.project = project_id # "your-project-id"
+
+ # Since this example works with local data, set a processing location.
+ bpd.options.bigquery.location = location # "US"
+
+ # Create a sample series.
+ xml_series = pd.Series(
+ [
+ textwrap.dedent(
+ """
+
+ The Great Gatsby
+ F. Scott Fitzgerald
+
+ """
+ ),
+ textwrap.dedent(
+ """
+
+ 1984
+ George Orwell
+
+ """
+ ),
+ textwrap.dedent(
+ """
+
+ Brave New World
+ Aldous Huxley
+
+ """
+ ),
+ ],
+ dtype=pd.ArrowDtype(pa.string()),
+ )
+ df = pd.DataFrame({"xml": xml_series})
+
+ # Use the BigQuery Accessor, which is automatically registered on pandas
+ # DataFrames when you import bigframes. This example uses a function that
+ # has been deployed to bigquery-utils for demonstration purposes. To use in
+ # production, deploy the function at
+ # https://github.com/GoogleCloudPlatform/bigquery-utils/blob/master/udfs/community/cw_xml_extract.sqlx
+ # to your own project.
+ titles_pandas = df.bigquery.sql_scalar(
+ "`bqutil`.`fn`.cw_xml_extract({xml}, '//title/text()')",
+ )
+
+ # Alternatively, call read_gbq_function to get a pointer to the function
+ # that can be applied on BigQuery DataFrames objects.
+ cw_xml_extract = bpd.read_gbq_function("bqutil.fn.cw_xml_extract")
+ xml_bigframes = bpd.read_pandas(xml_series)
+
+ xpath_query = "//title/text()"
+ titles_bigframes = xml_bigframes.apply(cw_xml_extract, args=(xpath_query,))
+ return titles_pandas, titles_bigframes
+ # [END bigquery_dataframes_call_python_udf]
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser()
+
+ # Note: GCP project ID can be inferred from the environment if Application
+ # Default Credentials are set, so None is perfectly valid for --project_id.
+ parser.add_argument("--project_id", type=str)
+ parser.add_argument("--location", default="US", type=str)
+ args = parser.parse_args()
+
+ pddf, bfdf = call_python_udf(project_id=args.project_id, location=args.location)
+ print(pddf)
+ print(bfdf.to_pandas())
diff --git a/bigquery/bigframes/call_python_udf_test.py b/bigquery/bigframes/call_python_udf_test.py
new file mode 100644
index 00000000000..67475d94718
--- /dev/null
+++ b/bigquery/bigframes/call_python_udf_test.py
@@ -0,0 +1,24 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import bigframes.pandas as bpd
+
+import call_python_udf
+
+
+def test_call_python_udf(project_id: str, location: str) -> None:
+ bpd.close_session()
+ pd_result, bf_result = call_python_udf.call_python_udf(project_id=project_id, location=location)
+ assert len(pd_result.index) == 3
+ assert len(bf_result.index) == 3
diff --git a/bigquery/bigframes/conftest.py b/bigquery/bigframes/conftest.py
new file mode 100644
index 00000000000..a3274f08ca3
--- /dev/null
+++ b/bigquery/bigframes/conftest.py
@@ -0,0 +1,27 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pytest
+
+
+@pytest.fixture(scope="session")
+def project_id() -> str:
+ return os.environ["GOOGLE_CLOUD_PROJECT"]
+
+
+@pytest.fixture(scope="session")
+def location() -> str:
+ return "US"
diff --git a/bigquery/bigframes/noxfile_config.py b/bigquery/bigframes/noxfile_config.py
new file mode 100644
index 00000000000..f19dde20378
--- /dev/null
+++ b/bigquery/bigframes/noxfile_config.py
@@ -0,0 +1,38 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be imported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": ["2.7", "3.6", "3.8", "3.9", "3.11"],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": True,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/bigquery/bigframes/requirements-test.txt b/bigquery/bigframes/requirements-test.txt
new file mode 100644
index 00000000000..f1684cd8061
--- /dev/null
+++ b/bigquery/bigframes/requirements-test.txt
@@ -0,0 +1,2 @@
+flaky==3.8.1
+pytest==8.2.0
diff --git a/bigquery/bigframes/requirements.txt b/bigquery/bigframes/requirements.txt
new file mode 100644
index 00000000000..a14856a58d8
--- /dev/null
+++ b/bigquery/bigframes/requirements.txt
@@ -0,0 +1 @@
+bigframes==2.38.0
diff --git a/cloud-sql/mysql/sqlalchemy/requirements.txt b/cloud-sql/mysql/sqlalchemy/requirements.txt
index b7e2ba6e10a..bcb4ea5c4bb 100644
--- a/cloud-sql/mysql/sqlalchemy/requirements.txt
+++ b/cloud-sql/mysql/sqlalchemy/requirements.txt
@@ -2,6 +2,6 @@ Flask==2.2.2
SQLAlchemy==2.0.40
PyMySQL==1.1.2
gunicorn==23.0.0
-cloud-sql-python-connector==1.20.0
+cloud-sql-python-connector==1.20.1
functions-framework==3.9.2
Werkzeug==2.3.8
diff --git a/cloud-sql/postgres/sqlalchemy/requirements.txt b/cloud-sql/postgres/sqlalchemy/requirements.txt
index ba738cc1669..e44a280e6bf 100644
--- a/cloud-sql/postgres/sqlalchemy/requirements.txt
+++ b/cloud-sql/postgres/sqlalchemy/requirements.txt
@@ -1,7 +1,7 @@
Flask==2.2.2
pg8000==1.31.5
SQLAlchemy==2.0.40
-cloud-sql-python-connector==1.20.0
+cloud-sql-python-connector==1.20.1
gunicorn==23.0.0
functions-framework==3.9.2
Werkzeug==2.3.8
diff --git a/cloud-sql/sql-server/sqlalchemy/requirements.txt b/cloud-sql/sql-server/sqlalchemy/requirements.txt
index a5122909569..32660dce882 100644
--- a/cloud-sql/sql-server/sqlalchemy/requirements.txt
+++ b/cloud-sql/sql-server/sqlalchemy/requirements.txt
@@ -3,7 +3,7 @@ gunicorn==23.0.0
python-tds==1.16.0
pyopenssl==26.0.0
SQLAlchemy==2.0.40
-cloud-sql-python-connector==1.20.0
+cloud-sql-python-connector==1.20.1
sqlalchemy-pytds==1.0.2
functions-framework==3.9.2
Werkzeug==2.3.8
diff --git a/composer/airflow_1_samples/gke_operator.py b/composer/airflow_1_samples/gke_operator.py
index 082d3333f9a..410a5a2aa7d 100644
--- a/composer/airflow_1_samples/gke_operator.py
+++ b/composer/airflow_1_samples/gke_operator.py
@@ -97,7 +97,7 @@
# [END composer_gkeoperator_minconfig_airflow_1]
# [START composer_gkeoperator_templateconfig_airflow_1]
- kubenetes_template_ex = GKEStartPodOperator(
+ kubernetes_template_ex = GKEStartPodOperator(
task_id="ex-kube-templates",
name="ex-kube-templates",
project_id=PROJECT_ID,
@@ -243,6 +243,6 @@
create_cluster >> create_node_pools >> kubernetes_min_pod >> delete_cluster
create_cluster >> create_node_pools >> kubernetes_full_pod >> delete_cluster
create_cluster >> create_node_pools >> kubernetes_affinity_ex >> delete_cluster
- create_cluster >> create_node_pools >> kubenetes_template_ex >> delete_cluster
+ create_cluster >> create_node_pools >> kubernetes_template_ex >> delete_cluster
# [END composer_gkeoperator_airflow_1]
diff --git a/composer/airflow_1_samples/kubernetes_pod_operator.py b/composer/airflow_1_samples/kubernetes_pod_operator.py
index 2799f467ec9..2062d2cc636 100644
--- a/composer/airflow_1_samples/kubernetes_pod_operator.py
+++ b/composer/airflow_1_samples/kubernetes_pod_operator.py
@@ -97,7 +97,7 @@
)
# [END composer_kubernetespodoperator_minconfig_airflow_1]
# [START composer_kubernetespodoperator_templateconfig_airflow_1]
- kubenetes_template_ex = kubernetes_pod_operator.KubernetesPodOperator(
+ kubernetes_template_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id="ex-kube-templates",
name="ex-kube-templates",
namespace="default",
diff --git a/composer/cicd_sample/utils/add_dags_to_composer.py b/composer/cicd_sample/utils/add_dags_to_composer.py
index 8e5698f0ba8..7df54ff52fb 100644
--- a/composer/cicd_sample/utils/add_dags_to_composer.py
+++ b/composer/cicd_sample/utils/add_dags_to_composer.py
@@ -57,7 +57,7 @@ def upload_dags_to_composer(
if len(dags) > 0:
# Note - the GCS client library does not currently support batch requests on uploads
# if you have a large number of files, consider using
- # the Python subprocess module to run gsutil -m cp -r on your dags
+ # the Python subprocess module to run gcloud storage cp --recursive on your dags
# See https://cloud.google.com/storage/docs/gsutil/commands/cp for more info
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
diff --git a/composer/workflows/gke_operator.py b/composer/workflows/gke_operator.py
index 31536ba55e7..acf60c05e5a 100644
--- a/composer/workflows/gke_operator.py
+++ b/composer/workflows/gke_operator.py
@@ -91,7 +91,7 @@
# [END composer_gkeoperator_minconfig]
# [START composer_gkeoperator_templateconfig]
- kubenetes_template_ex = GKEStartPodOperator(
+ kubernetes_template_ex = GKEStartPodOperator(
task_id="ex-kube-templates",
name="ex-kube-templates",
project_id=PROJECT_ID,
@@ -238,6 +238,6 @@
create_cluster >> kubernetes_min_pod >> delete_cluster
create_cluster >> kubernetes_full_pod >> delete_cluster
create_cluster >> kubernetes_affinity_ex >> delete_cluster
- create_cluster >> kubenetes_template_ex >> delete_cluster
+ create_cluster >> kubernetes_template_ex >> delete_cluster
# [END composer_gkeoperator]
diff --git a/composer/workflows/kubernetes_pod_operator.py b/composer/workflows/kubernetes_pod_operator.py
index 26dcb9d5173..835bd108fd4 100644
--- a/composer/workflows/kubernetes_pod_operator.py
+++ b/composer/workflows/kubernetes_pod_operator.py
@@ -100,7 +100,7 @@
)
# [END composer_kubernetespodoperator_minconfig]
# [START composer_kubernetespodoperator_templateconfig]
- kubenetes_template_ex = KubernetesPodOperator(
+ kubernetes_template_ex = KubernetesPodOperator(
task_id="ex-kube-templates",
name="ex-kube-templates",
namespace="default",
diff --git a/memorystore/redis/gce_deployment/deploy.sh b/memorystore/redis/gce_deployment/deploy.sh
index 0fa80846da6..4352fdb8bcf 100755
--- a/memorystore/redis/gce_deployment/deploy.sh
+++ b/memorystore/redis/gce_deployment/deploy.sh
@@ -37,7 +37,7 @@ fi
#Upload the tar to GCS
tar -cvf app.tar -C .. requirements.txt main.py
# Copy to GCS bucket
-gsutil cp app.tar gs://"$GCS_BUCKET_NAME"/gce/
+gcloud storage cp app.tar gs://"$GCS_BUCKET_NAME"/gce/
# Create an instance
gcloud compute instances create my-instance \
diff --git a/memorystore/redis/gce_deployment/startup-script.sh b/memorystore/redis/gce_deployment/startup-script.sh
index 3e523246114..5a8e0bb0b09 100644
--- a/memorystore/redis/gce_deployment/startup-script.sh
+++ b/memorystore/redis/gce_deployment/startup-script.sh
@@ -33,7 +33,7 @@ apt-get install -yq \
curl -s "https://storage.googleapis.com/signals-agents/logging/google-fluentd-install.sh" | bash
service google-fluentd restart &
-gsutil cp gs://"$GCS_BUCKET_NAME"/gce/app.tar /app.tar
+gcloud storage cp gs://"$GCS_BUCKET_NAME"/gce/app.tar /app.tar
mkdir -p /app
tar -x -f /app.tar -C /app
cd /app
diff --git a/notebooks/tutorials/cloud-ml-engine/Training and prediction with scikit-learn.ipynb b/notebooks/tutorials/cloud-ml-engine/Training and prediction with scikit-learn.ipynb
index 4db540c6ae1..d805d0cb1fe 100644
--- a/notebooks/tutorials/cloud-ml-engine/Training and prediction with scikit-learn.ipynb
+++ b/notebooks/tutorials/cloud-ml-engine/Training and prediction with scikit-learn.ipynb
@@ -76,7 +76,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil mb gs://$BUCKET_NAME/"
+ "!gcloud storage buckets create gs://$BUCKET_NAME"
]
},
{
@@ -377,7 +377,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil ls gs://$BUCKET_NAME/"
+ "!gcloud storage ls gs://$BUCKET_NAME/"
]
},
{
@@ -539,7 +539,7 @@
"!gcloud ai-platform models delete $MODEL_NAME --quiet\n",
"\n",
"# Delete the bucket and contents\n",
- "!gsutil rm -r gs://$BUCKET_NAME\n",
+ "!gcloud storage rm --recursive gs://$BUCKET_NAME\n",
"\n",
"# Delete the local files created by the tutorial\n",
"!rm -rf census_training"
diff --git a/notebooks/tutorials/storage/Storage command-line tool.ipynb b/notebooks/tutorials/storage/Storage command-line tool.ipynb
index 21e62ae8236..eef0054c790 100644
--- a/notebooks/tutorials/storage/Storage command-line tool.ipynb
+++ b/notebooks/tutorials/storage/Storage command-line tool.ipynb
@@ -26,6 +26,7 @@
},
"outputs": [],
"source": [
+ "# ERROR: A migration for this command is not implemented. Please refer to the migration guide.\n",
"!gsutil help"
]
},
@@ -67,7 +68,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil mb gs://{bucket_name}/"
+ "!gcloud storage buckets create gs://{bucket_name}/"
]
},
{
@@ -95,7 +96,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil ls -p $project_id"
+ "!gcloud storage ls --project $project_id"
]
},
{
@@ -128,7 +129,7 @@
},
"outputs": [],
"source": [
- "!gsutil ls -L -b gs://{bucket_name}/"
+ "!gcloud storage ls --full --buckets gs://{bucket_name}/"
]
},
{
@@ -163,7 +164,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil cp resources/us-states.txt gs://{bucket_name}/"
+ "!gcloud storage cp resources/us-states.txt gs://{bucket_name}/"
]
},
{
@@ -181,7 +182,7 @@
},
"outputs": [],
"source": [
- "!gsutil ls -r gs://{bucket_name}/**"
+ "!gcloud storage ls --recursive gs://{bucket_name}/**"
]
},
{
@@ -209,7 +210,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil ls -L gs://{bucket_name}/us-states.txt"
+ "!gcloud storage ls --full gs://{bucket_name}/us-states.txt"
]
},
{
@@ -245,7 +246,7 @@
},
"outputs": [],
"source": [
- "!gsutil cp gs://{bucket_name}/us-states.txt resources/downloaded-us-states.txt"
+ "!gcloud storage cp gs://{bucket_name}/us-states.txt resources/downloaded-us-states.txt"
]
},
{
@@ -270,7 +271,7 @@
},
"outputs": [],
"source": [
- "!gsutil rm gs://{bucket_name}/us-states.txt"
+ "!gcloud storage rm gs://{bucket_name}/us-states.txt"
]
},
{
@@ -288,7 +289,7 @@
"metadata": {},
"outputs": [],
"source": [
- "!gsutil rm -r gs://{bucket_name}/"
+ "!gcloud storage rm --recursive gs://{bucket_name}/"
]
},
{
diff --git a/people-and-planet-ai/land-cover-classification/e2e_test.py b/people-and-planet-ai/land-cover-classification/e2e_test.py
index c1c4aeadf9f..04dcc610126 100644
--- a/people-and-planet-ai/land-cover-classification/e2e_test.py
+++ b/people-and-planet-ai/land-cover-classification/e2e_test.py
@@ -57,7 +57,7 @@ def data_path(bucket_name: str) -> str:
def model_path(bucket_name: str) -> str:
# This is a different path than where Vertex AI saves its model.
gcs_path = f"gs://{bucket_name}/pretrained-model"
- conftest.run_cmd("gsutil", "-m", "cp", "-r", "./pretrained-model", gcs_path)
+ conftest.run_cmd("gcloud", "storage", "cp", "--recursive", "./pretrained-model", gcs_path)
return gcs_path
diff --git a/people-and-planet-ai/weather-forecasting/notebooks/2-dataset.ipynb b/people-and-planet-ai/weather-forecasting/notebooks/2-dataset.ipynb
index d4f505d03bc..969a82e30b2 100644
--- a/people-and-planet-ai/weather-forecasting/notebooks/2-dataset.ipynb
+++ b/people-and-planet-ai/weather-forecasting/notebooks/2-dataset.ipynb
@@ -700,8 +700,7 @@
},
"outputs": [],
"source": [
- "!gsutil ls -lh gs://{bucket}/weather/data-small"
- ],
+ "!gcloud storage ls --long --readable-sizes gs://{bucket}/weather/data-small" ],
"id": "F43OAIlrDosG"
},
{
diff --git a/people-and-planet-ai/weather-forecasting/notebooks/3-training.ipynb b/people-and-planet-ai/weather-forecasting/notebooks/3-training.ipynb
index ab637613a91..b8882b1d34d 100644
--- a/people-and-planet-ai/weather-forecasting/notebooks/3-training.ipynb
+++ b/people-and-planet-ai/weather-forecasting/notebooks/3-training.ipynb
@@ -285,7 +285,7 @@
"data_path_gcs = f\"gs://{bucket}/weather/data\"\n",
"\n",
"!mkdir -p data-training\n",
- "!gsutil -m cp {data_path_gcs}/* data-training"
+ "!gcloud storage cp {data_path_gcs}/* data-training"
],
"metadata": {
"id": "h_IUpnqvO-sa"
@@ -1336,7 +1336,7 @@
"cell_type": "code",
"source": [
"# Stage the `weather-model` package in Cloud Storage.\n",
- "!gsutil cp serving/weather-model/dist/weather-model-1.0.0.tar.gz gs://{bucket}/weather/"
+ "!gcloud storage cp serving/weather-model/dist/weather-model-1.0.0.tar.gz gs://{bucket}/weather/"
],
"metadata": {
"id": "JA1k9ky02dsx"
diff --git a/people-and-planet-ai/weather-forecasting/notebooks/4-predictions.ipynb b/people-and-planet-ai/weather-forecasting/notebooks/4-predictions.ipynb
index a2d72385465..405b52a5bd3 100644
--- a/people-and-planet-ai/weather-forecasting/notebooks/4-predictions.ipynb
+++ b/people-and-planet-ai/weather-forecasting/notebooks/4-predictions.ipynb
@@ -336,8 +336,7 @@
"model_path_gcs = f\"gs://{bucket}/weather/model\"\n",
"\n",
"!mkdir -p model\n",
- "!gsutil cp {model_path_gcs}/* model"
- ],
+ "!gcloud storage cp {model_path_gcs}/* model" ],
"metadata": {
"id": "5w_uNjluhDMG"
},
diff --git a/people-and-planet-ai/weather-forecasting/tests/predictions_tests/test_predictions.py b/people-and-planet-ai/weather-forecasting/tests/predictions_tests/test_predictions.py
index 9e3f63d7949..2f4b5b90e3f 100644
--- a/people-and-planet-ai/weather-forecasting/tests/predictions_tests/test_predictions.py
+++ b/people-and-planet-ai/weather-forecasting/tests/predictions_tests/test_predictions.py
@@ -37,7 +37,7 @@ def test_name() -> str:
@pytest.fixture(scope="session")
def model_path_gcs(bucket_name: str) -> str:
path_gcs = f"gs://{bucket_name}/model"
- conftest.run_cmd("gsutil", "cp", "serving/model/*", path_gcs)
+ conftest.run_cmd("gcloud", "storage", "cp", "serving/model/*", path_gcs)
return path_gcs
diff --git a/people-and-planet-ai/weather-forecasting/tests/training_tests/test_training.py b/people-and-planet-ai/weather-forecasting/tests/training_tests/test_training.py
index 1f921794ec6..140aef9758a 100644
--- a/people-and-planet-ai/weather-forecasting/tests/training_tests/test_training.py
+++ b/people-and-planet-ai/weather-forecasting/tests/training_tests/test_training.py
@@ -51,7 +51,7 @@ def data_path_gcs(bucket_name: str) -> str:
inputs_batch = [inputs] * batch_size
labels_batch = [labels] * batch_size
np.savez_compressed(f, inputs=inputs_batch, labels=labels_batch)
- conftest.run_cmd("gsutil", "cp", f.name, f"{path_gcs}/example.npz")
+ conftest.run_cmd("gcloud", "storage", "cp", f.name, f"{path_gcs}/example.npz")
return path_gcs
diff --git a/recaptcha_enterprise/demosite/app/requirements.txt b/recaptcha_enterprise/demosite/app/requirements.txt
index 16c29cd89fa..99a041bbcf9 100644
--- a/recaptcha_enterprise/demosite/app/requirements.txt
+++ b/recaptcha_enterprise/demosite/app/requirements.txt
@@ -1,4 +1,4 @@
-Flask==3.0.3
-gunicorn==23.0.0
-google-cloud-recaptcha-enterprise==1.25.0
-Werkzeug==3.0.3
+Flask==3.1.3
+gunicorn==25.1.0
+google-cloud-recaptcha-enterprise==1.30.0
+Werkzeug==3.1.6
diff --git a/retail/snippets/README.md b/retail/snippets/README.md
new file mode 100644
index 00000000000..7cf3778d6a7
--- /dev/null
+++ b/retail/snippets/README.md
@@ -0,0 +1,26 @@
+# Vertex AI Search for commerce Samples
+
+This directory contains Python samples for [Vertex AI Search for commerce](https://cloud.google.com/retail/docs/search-basic#search).
+
+## Prerequisites
+
+To run these samples, you must have:
+
+1. **A Google Cloud Project** with the [Vertex AI Search for commerce API](https://console.cloud.google.com/apis/library/retail.googleapis.com) enabled.
+2. **Vertex AI Search for commerce** set up with a valid catalog and serving configuration (placement).
+3. **Authentication**: These samples use [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/provide-credentials-adc).
+ - If running locally, you can set up ADC by running:
+ ```bash
+ gcloud auth application-default login
+ ```
+4. **IAM Roles**: The service account or user running the samples needs the `roles/retail.viewer` (Retail Viewer) role or higher.
+
+## Samples
+
+- **[search_request.py](search_request.py)**: Basic search request showing both text search and browse search (using categories).
+- **[search_pagination.py](search_pagination.py)**: Shows how to use `next_page_token` to paginate through search results.
+- **[search_offset.py](search_offset.py)**: Shows how to use `offset` to skip a specified number of results.
+
+## Documentation
+
+For more information, see the [Vertex AI Search for commerce documentation](https://docs.cloud.google.com/retail/docs/search-basic#search).
diff --git a/retail/snippets/conftest.py b/retail/snippets/conftest.py
new file mode 100644
index 00000000000..ff8eccf5441
--- /dev/null
+++ b/retail/snippets/conftest.py
@@ -0,0 +1,26 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pytest
+
+
+@pytest.fixture
+def project_id() -> str:
+ """Get the Google Cloud project ID from the environment."""
+ project_id = os.environ.get("BUILD_SPECIFIC_GCLOUD_PROJECT")
+ if not project_id:
+ project_id = os.environ.get("GOOGLE_CLOUD_PROJECT")
+ return project_id
diff --git a/retail/snippets/requirements-test.txt b/retail/snippets/requirements-test.txt
new file mode 100644
index 00000000000..ad59a67d1f7
--- /dev/null
+++ b/retail/snippets/requirements-test.txt
@@ -0,0 +1,5 @@
+pytest
+pytest-xdist
+mock
+google-cloud-retail>=2.10.0
+google-api-core
diff --git a/retail/snippets/requirements.txt b/retail/snippets/requirements.txt
new file mode 100644
index 00000000000..7c213ef275a
--- /dev/null
+++ b/retail/snippets/requirements.txt
@@ -0,0 +1 @@
+google-cloud-retail>=2.10.0
diff --git a/retail/snippets/search_offset.py b/retail/snippets/search_offset.py
new file mode 100644
index 00000000000..75e19bbfd6d
--- /dev/null
+++ b/retail/snippets/search_offset.py
@@ -0,0 +1,79 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START retail_v2_search_offset]
+import sys
+
+from google.api_core import exceptions
+from google.cloud import retail_v2
+
+client = retail_v2.SearchServiceClient()
+
+
+def search_offset(
+ project_id: str,
+ placement_id: str,
+ visitor_id: str,
+ query: str,
+ offset: int,
+) -> None:
+ """Search for products with an offset using Vertex AI Search for commerce.
+
+ Performs a search request starting from a specified position.
+
+ Args:
+ project_id: The Google Cloud project ID.
+ placement_id: The placement name for the search.
+ visitor_id: A unique identifier for the user.
+ query: The search term.
+ offset: The number of results to skip.
+ """
+ placement_path = client.serving_config_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ serving_config=placement_id,
+ )
+
+ branch_path = client.branch_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ branch="default_branch",
+ )
+
+ request = retail_v2.SearchRequest(
+ placement=placement_path,
+ branch=branch_path,
+ visitor_id=visitor_id,
+ query=query,
+ page_size=10,
+ offset=offset,
+ )
+
+ try:
+ response = client.search(request=request)
+
+ print(f"--- Results for offset: {offset} ---")
+ for result in response:
+ product = result.product
+ print(f"Product ID: {product.id}")
+ print(f" Title: {product.title}")
+ print(f" Scores: {result.model_scores}")
+
+ except exceptions.GoogleAPICallError as e:
+ print(f"error: {e.message}", file=sys.stderr)
+
+
+# [END retail_v2_search_offset]
diff --git a/retail/snippets/search_offset_test.py b/retail/snippets/search_offset_test.py
new file mode 100644
index 00000000000..d5650891003
--- /dev/null
+++ b/retail/snippets/search_offset_test.py
@@ -0,0 +1,66 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from google.cloud import retail_v2
+import pytest
+
+from search_offset import search_offset
+
+
+@pytest.fixture
+def test_config(project_id):
+ return {
+ "project_id": project_id,
+ "placement_id": "default_placement",
+ "visitor_id": "test_visitor",
+ }
+
+
+@mock.patch.object(retail_v2.SearchServiceClient, "search")
+def test_search_offset(mock_search, test_config, capsys):
+ # Mock result
+ mock_product = mock.Mock()
+ mock_product.id = "product_at_offset"
+ mock_product.title = "Offset Title"
+
+ mock_result = mock.Mock()
+ mock_result.product = mock_product
+
+ mock_page = mock.MagicMock()
+ mock_page.results = [mock_result]
+ mock_pager = mock.MagicMock()
+ mock_pager.pages = iter([mock_page])
+ mock_pager.__iter__.return_value = [mock_result]
+ mock_search.return_value = mock_pager
+
+ search_offset(
+ project_id=test_config["project_id"],
+ placement_id=test_config["placement_id"],
+ visitor_id=test_config["visitor_id"],
+ query="test query",
+ offset=10,
+ )
+
+ out, _ = capsys.readouterr()
+ assert "--- Results for offset: 10 ---" in out
+ assert "Product ID: product_at_offset" in out
+
+ # Verify call request
+ args, kwargs = mock_search.call_args
+ request = kwargs.get("request") or args[0]
+ assert request.offset == 10
+ assert request.page_size == 10
+ assert request.query == "test query"
diff --git a/retail/snippets/search_pagination.py b/retail/snippets/search_pagination.py
new file mode 100644
index 00000000000..00d3dfa5605
--- /dev/null
+++ b/retail/snippets/search_pagination.py
@@ -0,0 +1,94 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START retail_v2_search_pagination]
+import sys
+
+from google.api_core import exceptions
+from google.cloud import retail_v2
+
+client = retail_v2.SearchServiceClient()
+
+
+def search_pagination(
+ project_id: str,
+ placement_id: str,
+ visitor_id: str,
+ query: str,
+) -> None:
+ """Search for products with pagination using Vertex AI Search for commerce.
+
+ Performs a search request, then uses the next_page_token to get the next page.
+
+ Args:
+ project_id: The Google Cloud project ID.
+ placement_id: The placement name for the search.
+ visitor_id: A unique identifier for the user.
+ query: The search term.
+ """
+ placement_path = client.serving_config_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ serving_config=placement_id,
+ )
+
+ branch_path = client.branch_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ branch="default_branch",
+ )
+
+ # First page request
+ first_request = retail_v2.SearchRequest(
+ placement=placement_path,
+ branch=branch_path,
+ visitor_id=visitor_id,
+ query=query,
+ page_size=5,
+ )
+
+ try:
+ first_response = client.search(request=first_request)
+ print("--- First Page ---")
+ first_page = next(first_response.pages)
+ for result in first_page.results:
+ print(f"Product ID: {result.product.id}")
+
+ next_page_token = first_response.next_page_token
+
+ if next_page_token:
+ # Second page request using page_token
+ second_request = retail_v2.SearchRequest(
+ placement=placement_path,
+ branch=branch_path,
+ visitor_id=visitor_id,
+ query=query,
+ page_size=5,
+ page_token=next_page_token,
+ )
+ second_response = client.search(request=second_request)
+ print("\n--- Second Page ---")
+ second_page = next(second_response.pages)
+ for result in second_page.results:
+ print(f"Product ID: {result.product.id}")
+ else:
+ print("\nNo more pages.")
+
+ except exceptions.GoogleAPICallError as e:
+ print(f"error: {e.message}", file=sys.stderr)
+
+
+# [END retail_v2_search_pagination]
diff --git a/retail/snippets/search_pagination_test.py b/retail/snippets/search_pagination_test.py
new file mode 100644
index 00000000000..8afe99ab50a
--- /dev/null
+++ b/retail/snippets/search_pagination_test.py
@@ -0,0 +1,88 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from google.cloud import retail_v2
+import pytest
+
+from search_pagination import search_pagination
+
+
+@pytest.fixture
+def test_config(project_id):
+ return {
+ "project_id": project_id,
+ "placement_id": "default_placement",
+ "visitor_id": "test_visitor",
+ }
+
+
+@mock.patch.object(retail_v2.SearchServiceClient, "search")
+def test_search_pagination(mock_search, test_config, capsys):
+ # Mock first response
+ mock_product_1 = mock.Mock()
+ mock_product_1.id = "product_1"
+
+ mock_result_1 = mock.Mock()
+ mock_result_1.product = mock_product_1
+
+ mock_page_1 = mock.MagicMock()
+ mock_page_1.results = [mock_result_1]
+ mock_first_response = mock.MagicMock()
+ mock_first_response.next_page_token = "token_for_page_2"
+ mock_first_response.pages = iter([mock_page_1])
+ mock_first_response.__iter__.return_value = [mock_result_1]
+
+ # Mock second response
+ mock_product_2 = mock.Mock()
+ mock_product_2.id = "product_2"
+
+ mock_result_2 = mock.Mock()
+ mock_result_2.product = mock_product_2
+
+ mock_page_2 = mock.MagicMock()
+ mock_page_2.results = [mock_result_2]
+ mock_second_response = mock.MagicMock()
+ mock_second_response.next_page_token = ""
+ mock_second_response.pages = iter([mock_page_2])
+ mock_second_response.__iter__.return_value = [mock_result_2]
+
+ mock_search.side_effect = [mock_first_response, mock_second_response]
+
+ search_pagination(
+ project_id=test_config["project_id"],
+ placement_id=test_config["placement_id"],
+ visitor_id=test_config["visitor_id"],
+ query="test query",
+ )
+
+ out, _ = capsys.readouterr()
+ assert "--- First Page ---" in out
+ assert "Product ID: product_1" in out
+ assert "--- Second Page ---" in out
+ assert "Product ID: product_2" in out
+
+ # Verify calls
+ assert mock_search.call_count == 2
+
+ # Check first call request
+ first_call_request = mock_search.call_args_list[0].kwargs["request"]
+ assert first_call_request.page_size == 5
+ assert not first_call_request.page_token
+
+ # Check second call request
+ second_call_request = mock_search.call_args_list[1].kwargs["request"]
+ assert second_call_request.page_size == 5
+ assert second_call_request.page_token == "token_for_page_2"
diff --git a/retail/snippets/search_request.py b/retail/snippets/search_request.py
new file mode 100644
index 00000000000..80784411fb9
--- /dev/null
+++ b/retail/snippets/search_request.py
@@ -0,0 +1,85 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START retail_v2_search_request]
+import sys
+from typing import List
+
+from google.api_core import exceptions
+from google.cloud import retail_v2
+
+client = retail_v2.SearchServiceClient()
+
+
+def search_request(
+ project_id: str,
+ placement_id: str,
+ visitor_id: str,
+ query: str = "",
+ page_categories: List[str] = None,
+) -> None:
+ """Search for products using Vertex AI Search for commerce.
+
+ Performs a search request for a specific placement.
+ Handles both text search (using query) and browse search (using page_categories).
+
+ Args:
+ project_id: The Google Cloud project ID.
+ placement_id: The placement name for the search.
+ visitor_id: A unique identifier for the user.
+ query: The search term for text search.
+ page_categories: The categories for browse search.
+ """
+ placement_path = client.serving_config_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ serving_config=placement_id,
+ )
+
+ branch_path = client.branch_path(
+ project=project_id,
+ location="global",
+ catalog="default_catalog",
+ branch="default_branch",
+ )
+
+ request = retail_v2.SearchRequest(
+ placement=placement_path,
+ branch=branch_path,
+ visitor_id=visitor_id,
+ query=query,
+ page_categories=page_categories or [],
+ page_size=10,
+ )
+
+ try:
+ response = client.search(request=request)
+
+ for result in response:
+ product = result.product
+ print(f"Product ID: {product.id}")
+ print(f" Title: {product.title}")
+ scores = dict(result.model_scores.items())
+ print(f" Scores: {scores}")
+
+ except exceptions.GoogleAPICallError as e:
+ print(f"error: {e.message}", file=sys.stderr)
+ print(
+ f"Troubleshooting Context: Project: {project_id}, Catalog: default_catalog",
+ file=sys.stderr,
+ )
+
+
+# [END retail_v2_search_request]
diff --git a/retail/snippets/search_request_test.py b/retail/snippets/search_request_test.py
new file mode 100644
index 00000000000..b1472cdf1a9
--- /dev/null
+++ b/retail/snippets/search_request_test.py
@@ -0,0 +1,121 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from google.cloud import retail_v2
+import pytest
+
+from search_request import search_request
+
+
+@pytest.fixture
+def test_config(project_id):
+ return {
+ "project_id": project_id,
+ "placement_id": "default_placement",
+ "visitor_id": "test_visitor",
+ }
+
+
+@mock.patch.object(retail_v2.SearchServiceClient, "search")
+def test_search_request_text(mock_search, test_config, capsys):
+ # Mock return value for search call
+ mock_product = mock.Mock()
+ mock_product.id = "test_product_id"
+ mock_product.title = "Test Product Title"
+
+ mock_result = mock.Mock()
+ mock_result.product = mock_product
+ mock_result.model_scores = {"relevance": 0.95}
+
+ mock_page = mock.MagicMock()
+ mock_page.results = [mock_result]
+ mock_pager = mock.MagicMock()
+ mock_pager.pages = iter([mock_page])
+ mock_pager.__iter__.return_value = [mock_result]
+ mock_search.return_value = mock_pager
+
+ search_request(
+ project_id=test_config["project_id"],
+ placement_id=test_config["placement_id"],
+ visitor_id=test_config["visitor_id"],
+ query="test query",
+ )
+
+ out, _ = capsys.readouterr()
+ assert "Product ID: test_product_id" in out
+ assert "Title: Test Product Title" in out
+ assert "Scores: {'relevance': 0.95}" in out
+
+ # Verify that search was called with query
+ args, kwargs = mock_search.call_args
+ request = kwargs.get("request") or args[0]
+ assert request.query == "test query"
+ assert not request.page_categories
+
+
+@mock.patch.object(retail_v2.SearchServiceClient, "search")
+def test_search_request_browse(mock_search, test_config, capsys):
+ # Mock return value for search call
+ mock_product = mock.Mock()
+ mock_product.id = "test_browse_id"
+ mock_product.title = "Browse Product Title"
+
+ mock_result = mock.Mock()
+ mock_result.product = mock_product
+ mock_result.model_scores = {"relevance": 0.8}
+
+ mock_page = mock.MagicMock()
+ mock_page.results = [mock_result]
+ mock_pager = mock.MagicMock()
+ mock_pager.pages = iter([mock_page])
+ mock_pager.__iter__.return_value = [mock_result]
+ mock_search.return_value = mock_pager
+
+ search_request(
+ project_id=test_config["project_id"],
+ placement_id=test_config["placement_id"],
+ visitor_id=test_config["visitor_id"],
+ page_categories=["Electronics", "Laptops"],
+ )
+
+ out, _ = capsys.readouterr()
+ assert "Product ID: test_browse_id" in out
+ assert "Title: Browse Product Title" in out
+ assert "Scores: {'relevance': 0.8}" in out
+
+ # Verify that search was called with page_categories
+ args, kwargs = mock_search.call_args
+ request = kwargs.get("request") or args[0]
+ assert not request.query
+ assert "Electronics" in request.page_categories
+ assert "Laptops" in request.page_categories
+
+
+@mock.patch.object(retail_v2.SearchServiceClient, "search")
+def test_search_request_error(mock_search, test_config, capsys):
+ from google.api_core import exceptions
+
+ mock_search.side_effect = exceptions.InvalidArgument("test error")
+
+ search_request(
+ project_id=test_config["project_id"],
+ placement_id=test_config["placement_id"],
+ visitor_id=test_config["visitor_id"],
+ )
+
+ _, err = capsys.readouterr()
+ assert "error: test error" in err
+ assert f"Project: {test_config['project_id']}" in err
diff --git a/run/mcp-server/README.md b/run/mcp-server/README.md
index f4c61795eab..19fe6971155 100644
--- a/run/mcp-server/README.md
+++ b/run/mcp-server/README.md
@@ -6,6 +6,8 @@ This sample uses the `streamable-http` transport, which allows for running MCP
servers remotely. You can read more about MCP transports in the
[official MCP docs](https://modelcontextprotocol.io/docs/concepts/architecture#transport-layer).
+
+
## Benefits of running an MCP server remotely
Running an MCP server remotely on Cloud Run can provide several benefits:
diff --git a/run/mcp-server/pyproject.toml b/run/mcp-server/pyproject.toml
index 32fdf3743bb..8f46df9ef2c 100644
--- a/run/mcp-server/pyproject.toml
+++ b/run/mcp-server/pyproject.toml
@@ -5,5 +5,5 @@ description = "Example of deploying an MCP server on Cloud Run"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
- "fastmcp==3.0.0",
+ "fastmcp==3.2.0",
]
diff --git a/run/mcp-server/uv.lock b/run/mcp-server/uv.lock
index 517177da22d..184999d5857 100644
--- a/run/mcp-server/uv.lock
+++ b/run/mcp-server/uv.lock
@@ -94,14 +94,24 @@ sdist = { url = "https://files.pythonhosted.org/packages/92/88/b8527e1b00c1811db
wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/80/ea4ead0c5d52a9828692e7df20f0eafe8d26e671ce4883a0a146bb91049e/caio-0.9.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ca6c8ecda611478b6016cb94d23fd3eb7124852b985bdec7ecaad9f3116b9619", size = 36836, upload-time = "2025-12-26T15:22:04.662Z" },
{ url = "https://files.pythonhosted.org/packages/17/b9/36715c97c873649d1029001578f901b50250916295e3dddf20c865438865/caio-0.9.25-cp310-cp310-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:db9b5681e4af8176159f0d6598e73b2279bb661e718c7ac23342c550bd78c241", size = 79695, upload-time = "2025-12-26T15:22:18.818Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ab/07080ecb1adb55a02cbd8ec0126aa8e43af343ffabb6a71125b42670e9a1/caio-0.9.25-cp310-cp310-manylinux_2_34_aarch64.whl", hash = "sha256:bf61d7d0c4fd10ffdd98ca47f7e8db4d7408e74649ffaf4bef40b029ada3c21b", size = 79457, upload-time = "2026-03-04T22:08:16.024Z" },
+ { url = "https://files.pythonhosted.org/packages/88/95/dd55757bb671eb4c376e006c04e83beb413486821f517792ea603ef216e9/caio-0.9.25-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:ab52e5b643f8bbd64a0605d9412796cd3464cb8ca88593b13e95a0f0b10508ae", size = 77705, upload-time = "2026-03-04T22:08:17.202Z" },
{ url = "https://files.pythonhosted.org/packages/ec/90/543f556fcfcfa270713eef906b6352ab048e1e557afec12925c991dc93c2/caio-0.9.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d6956d9e4a27021c8bd6c9677f3a59eb1d820cc32d0343cea7961a03b1371965", size = 36839, upload-time = "2025-12-26T15:21:40.267Z" },
{ url = "https://files.pythonhosted.org/packages/51/3b/36f3e8ec38dafe8de4831decd2e44c69303d2a3892d16ceda42afed44e1b/caio-0.9.25-cp311-cp311-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bf84bfa039f25ad91f4f52944452a5f6f405e8afab4d445450978cd6241d1478", size = 80255, upload-time = "2025-12-26T15:22:20.271Z" },
+ { url = "https://files.pythonhosted.org/packages/df/ce/65e64867d928e6aff1b4f0e12dba0ef6d5bf412c240dc1df9d421ac10573/caio-0.9.25-cp311-cp311-manylinux_2_34_aarch64.whl", hash = "sha256:ae3d62587332bce600f861a8de6256b1014d6485cfd25d68c15caf1611dd1f7c", size = 80052, upload-time = "2026-03-04T22:08:20.402Z" },
+ { url = "https://files.pythonhosted.org/packages/46/90/e278863c47e14ec58309aa2e38a45882fbe67b4cc29ec9bc8f65852d3e45/caio-0.9.25-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:fc220b8533dcf0f238a6b1a4a937f92024c71e7b10b5a2dfc1c73604a25709bc", size = 78273, upload-time = "2026-03-04T22:08:21.368Z" },
{ url = "https://files.pythonhosted.org/packages/d3/25/79c98ebe12df31548ba4eaf44db11b7cad6b3e7b4203718335620939083c/caio-0.9.25-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fb7ff95af4c31ad3f03179149aab61097a71fd85e05f89b4786de0359dffd044", size = 36983, upload-time = "2025-12-26T15:21:36.075Z" },
{ url = "https://files.pythonhosted.org/packages/a3/2b/21288691f16d479945968a0a4f2856818c1c5be56881d51d4dac9b255d26/caio-0.9.25-cp312-cp312-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:97084e4e30dfa598449d874c4d8e0c8d5ea17d2f752ef5e48e150ff9d240cd64", size = 82012, upload-time = "2025-12-26T15:22:20.983Z" },
+ { url = "https://files.pythonhosted.org/packages/03/c4/8a1b580875303500a9c12b9e0af58cb82e47f5bcf888c2457742a138273c/caio-0.9.25-cp312-cp312-manylinux_2_34_aarch64.whl", hash = "sha256:4fa69eba47e0f041b9d4f336e2ad40740681c43e686b18b191b6c5f4c5544bfb", size = 81502, upload-time = "2026-03-04T22:08:22.381Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/1c/0fe770b8ffc8362c48134d1592d653a81a3d8748d764bec33864db36319d/caio-0.9.25-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:6bebf6f079f1341d19f7386db9b8b1f07e8cc15ae13bfdaff573371ba0575d69", size = 80200, upload-time = "2026-03-04T22:08:23.382Z" },
{ url = "https://files.pythonhosted.org/packages/31/57/5e6ff127e6f62c9f15d989560435c642144aa4210882f9494204bc892305/caio-0.9.25-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d6c2a3411af97762a2b03840c3cec2f7f728921ff8adda53d7ea2315a8563451", size = 36979, upload-time = "2025-12-26T15:21:35.484Z" },
{ url = "https://files.pythonhosted.org/packages/a3/9f/f21af50e72117eb528c422d4276cbac11fb941b1b812b182e0a9c70d19c5/caio-0.9.25-cp313-cp313-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0998210a4d5cd5cb565b32ccfe4e53d67303f868a76f212e002a8554692870e6", size = 81900, upload-time = "2025-12-26T15:22:21.919Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/12/c39ae2a4037cb10ad5eb3578eb4d5f8c1a2575c62bba675f3406b7ef0824/caio-0.9.25-cp313-cp313-manylinux_2_34_aarch64.whl", hash = "sha256:1a177d4777141b96f175fe2c37a3d96dec7911ed9ad5f02bac38aaa1c936611f", size = 81523, upload-time = "2026-03-04T22:08:25.187Z" },
+ { url = "https://files.pythonhosted.org/packages/22/59/f8f2e950eb4f1a5a3883e198dca514b9d475415cb6cd7b78b9213a0dd45a/caio-0.9.25-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:9ed3cfb28c0e99fec5e208c934e5c157d0866aa9c32aa4dc5e9b6034af6286b7", size = 80243, upload-time = "2026-03-04T22:08:26.449Z" },
{ url = "https://files.pythonhosted.org/packages/69/ca/a08fdc7efdcc24e6a6131a93c85be1f204d41c58f474c42b0670af8c016b/caio-0.9.25-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fab6078b9348e883c80a5e14b382e6ad6aabbc4429ca034e76e730cf464269db", size = 36978, upload-time = "2025-12-26T15:21:41.055Z" },
{ url = "https://files.pythonhosted.org/packages/5e/6c/d4d24f65e690213c097174d26eda6831f45f4734d9d036d81790a27e7b78/caio-0.9.25-cp314-cp314-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:44a6b58e52d488c75cfaa5ecaa404b2b41cc965e6c417e03251e868ecd5b6d77", size = 81832, upload-time = "2025-12-26T15:22:22.757Z" },
+ { url = "https://files.pythonhosted.org/packages/87/a4/e534cf7d2d0e8d880e25dd61e8d921ffcfe15bd696734589826f5a2df727/caio-0.9.25-cp314-cp314-manylinux_2_34_aarch64.whl", hash = "sha256:628a630eb7fb22381dd8e3c8ab7f59e854b9c806639811fc3f4310c6bd711d79", size = 81565, upload-time = "2026-03-04T22:08:27.483Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/ed/bf81aeac1d290017e5e5ac3e880fd56ee15e50a6d0353986799d1bc5cfd5/caio-0.9.25-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:0ba16aa605ccb174665357fc729cf500679c2d94d5f1458a6f0d5ca48f2060a7", size = 80071, upload-time = "2026-03-04T22:08:28.751Z" },
{ url = "https://files.pythonhosted.org/packages/86/93/1f76c8d1bafe3b0614e06b2195784a3765bbf7b0a067661af9e2dd47fc33/caio-0.9.25-py3-none-any.whl", hash = "sha256:06c0bb02d6b929119b1cfbe1ca403c768b2013a369e2db46bfa2a5761cf82e40", size = 19087, upload-time = "2025-12-26T15:22:00.221Z" },
]
@@ -399,7 +409,7 @@ wheels = [
[[package]]
name = "fastmcp"
-version = "3.0.0"
+version = "3.2.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "authlib" },
@@ -419,13 +429,14 @@ dependencies = [
{ name = "python-dotenv" },
{ name = "pyyaml" },
{ name = "rich" },
+ { name = "uncalled-for" },
{ name = "uvicorn" },
{ name = "watchfiles" },
{ name = "websockets" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/4b/be/beb5d3e485983b9dd122f3f74772bcceeb085ca824e11c52c14ba71cf21a/fastmcp-3.0.0.tar.gz", hash = "sha256:f3b0cfa012f6b2b50b877da181431c6f9a551197f466b0bb7de7f39ceae159a1", size = 16093079, upload-time = "2026-02-18T21:25:34.461Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/d0/32/4f1b2cfd7b50db89114949f90158b1dcc2c92a1917b9f57c0ff24e47a2f4/fastmcp-3.2.0.tar.gz", hash = "sha256:d4830b8ffc3592d3d9c76dc0f398904cf41f04910e41a0de38cc1004e0903bef", size = 26318581, upload-time = "2026-03-30T20:25:37.692Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/12/14/05bebaf3764ea71ce6fa9d3fcf870610bbc8b1e7be2505e870d709375316/fastmcp-3.0.0-py3-none-any.whl", hash = "sha256:561d537cb789f995174c5591f1b54f758ce3f82da3cd951ffe51ce18609569e9", size = 603327, upload-time = "2026-02-18T21:25:36.701Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/67/684fa2d2de1e7504549d4ca457b4f854ccec3cd3be03bd86b33b599fbf58/fastmcp-3.2.0-py3-none-any.whl", hash = "sha256:e71aba3df16f86f546a4a9e513261d3233bcc92bef0dfa647bac3fa33623f681", size = 705550, upload-time = "2026-03-30T20:25:35.499Z" },
]
[[package]]
@@ -655,7 +666,7 @@ dependencies = [
]
[package.metadata]
-requires-dist = [{ name = "fastmcp", specifier = "==3.0.0" }]
+requires-dist = [{ name = "fastmcp", specifier = "==3.2.0" }]
[[package]]
name = "mdurl"
@@ -1348,6 +1359,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
]
+[[package]]
+name = "uncalled-for"
+version = "0.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/02/7c/b5b7d8136f872e3f13b0584e576886de0489d7213a12de6bebf29ff6ebfc/uncalled_for-0.2.0.tar.gz", hash = "sha256:b4f8fdbcec328c5a113807d653e041c5094473dd4afa7c34599ace69ccb7e69f", size = 49488, upload-time = "2026-02-27T17:40:58.137Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ff/7f/4320d9ce3be404e6310b915c3629fe27bf1e2f438a1a7a3cb0396e32e9a9/uncalled_for-0.2.0-py3-none-any.whl", hash = "sha256:2c0bd338faff5f930918f79e7eb9ff48290df2cb05fcc0b40a7f334e55d4d85f", size = 11351, upload-time = "2026-02-27T17:40:56.804Z" },
+]
+
[[package]]
name = "urllib3"
version = "2.5.0"
diff --git a/storage/samples/AUTHORING_GUIDE.md b/storage/samples/AUTHORING_GUIDE.md
new file mode 100644
index 00000000000..55c97b32f4c
--- /dev/null
+++ b/storage/samples/AUTHORING_GUIDE.md
@@ -0,0 +1 @@
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md
\ No newline at end of file
diff --git a/storage/samples/CONTRIBUTING.md b/storage/samples/CONTRIBUTING.md
new file mode 100644
index 00000000000..34c882b6f1a
--- /dev/null
+++ b/storage/samples/CONTRIBUTING.md
@@ -0,0 +1 @@
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md
\ No newline at end of file
diff --git a/storage/samples/snippets/acl_test.py b/storage/samples/snippets/acl_test.py
new file mode 100644
index 00000000000..eecee522b57
--- /dev/null
+++ b/storage/samples/snippets/acl_test.py
@@ -0,0 +1,168 @@
+# Copyright 2016 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import uuid
+
+import backoff
+from google.api_core.exceptions import GoogleAPIError
+from google.cloud import storage
+import pytest
+
+import storage_add_bucket_default_owner
+import storage_add_bucket_owner
+import storage_add_file_owner
+import storage_print_bucket_acl
+import storage_print_bucket_acl_for_user
+import storage_print_file_acl
+import storage_print_file_acl_for_user
+import storage_remove_bucket_default_owner
+import storage_remove_bucket_owner
+import storage_remove_file_owner
+
+# Typically we'd use a @example.com address, but GCS requires a real Google
+# account. Retrieve a service account email with storage admin permissions.
+TEST_EMAIL = "py38-storage-test" "@python-docs-samples-tests.iam.gserviceaccount.com"
+
+
+@pytest.fixture(scope="module")
+def test_bucket():
+ """Yields a bucket that is deleted after the test completes."""
+
+ # The new projects have uniform bucket-level access and our tests don't
+ # pass with those buckets. We need to use the old main project for now.
+ original_value = os.environ["GOOGLE_CLOUD_PROJECT"]
+ os.environ["GOOGLE_CLOUD_PROJECT"] = os.environ["MAIN_GOOGLE_CLOUD_PROJECT"]
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"acl-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create()
+ yield bucket
+ bucket.delete(force=True)
+ # Set the value back.
+ os.environ["GOOGLE_CLOUD_PROJECT"] = original_value
+
+
+@pytest.fixture
+def test_blob(test_bucket):
+ """Yields a blob that is deleted after the test completes."""
+ bucket = test_bucket
+ blob = bucket.blob(f"storage_acl_test_sigil-{uuid.uuid4()}")
+ blob.upload_from_string("Hello, is it me you're looking for?")
+ yield blob
+
+
+def test_print_bucket_acl(test_bucket, capsys):
+ storage_print_bucket_acl.print_bucket_acl(test_bucket.name)
+ out, _ = capsys.readouterr()
+ assert out
+
+
+def test_print_bucket_acl_for_user(test_bucket, capsys):
+ test_bucket.acl.user(TEST_EMAIL).grant_owner()
+ test_bucket.acl.save()
+
+ storage_print_bucket_acl_for_user.print_bucket_acl_for_user(
+ test_bucket.name, TEST_EMAIL
+ )
+
+ out, _ = capsys.readouterr()
+ assert "OWNER" in out
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_add_bucket_owner(test_bucket):
+ storage_add_bucket_owner.add_bucket_owner(test_bucket.name, TEST_EMAIL)
+
+ test_bucket.acl.reload()
+ assert "OWNER" in test_bucket.acl.user(TEST_EMAIL).get_roles()
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_remove_bucket_owner(test_bucket):
+ test_bucket.acl.user(TEST_EMAIL).grant_owner()
+ test_bucket.acl.save()
+
+ storage_remove_bucket_owner.remove_bucket_owner(test_bucket.name, TEST_EMAIL)
+
+ test_bucket.acl.reload()
+ assert "OWNER" not in test_bucket.acl.user(TEST_EMAIL).get_roles()
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_add_bucket_default_owner(test_bucket):
+ storage_add_bucket_default_owner.add_bucket_default_owner(
+ test_bucket.name, TEST_EMAIL
+ )
+
+ test_bucket.default_object_acl.reload()
+ roles = test_bucket.default_object_acl.user(TEST_EMAIL).get_roles()
+ assert "OWNER" in roles
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_remove_bucket_default_owner(test_bucket):
+ test_bucket.acl.user(TEST_EMAIL).grant_owner()
+ test_bucket.acl.save()
+
+ storage_remove_bucket_default_owner.remove_bucket_default_owner(
+ test_bucket.name, TEST_EMAIL
+ )
+
+ test_bucket.default_object_acl.reload()
+ roles = test_bucket.default_object_acl.user(TEST_EMAIL).get_roles()
+ assert "OWNER" not in roles
+
+
+def test_print_blob_acl(test_blob, capsys):
+ storage_print_file_acl.print_blob_acl(test_blob.bucket.name, test_blob.name)
+ out, _ = capsys.readouterr()
+ assert out
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_print_blob_acl_for_user(test_blob, capsys):
+ test_blob.acl.user(TEST_EMAIL).grant_owner()
+ test_blob.acl.save()
+
+ storage_print_file_acl_for_user.print_blob_acl_for_user(
+ test_blob.bucket.name, test_blob.name, TEST_EMAIL
+ )
+
+ out, _ = capsys.readouterr()
+ assert "OWNER" in out
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_add_blob_owner(test_blob):
+ storage_add_file_owner.add_blob_owner(
+ test_blob.bucket.name, test_blob.name, TEST_EMAIL
+ )
+
+ test_blob.acl.reload()
+ assert "OWNER" in test_blob.acl.user(TEST_EMAIL).get_roles()
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_remove_blob_owner(test_blob):
+ test_blob.acl.user(TEST_EMAIL).grant_owner()
+ test_blob.acl.save()
+
+ storage_remove_file_owner.remove_blob_owner(
+ test_blob.bucket.name, test_blob.name, TEST_EMAIL
+ )
+
+ test_blob.acl.reload()
+ assert "OWNER" not in test_blob.acl.user(TEST_EMAIL).get_roles()
diff --git a/storage/samples/snippets/bucket_lock_test.py b/storage/samples/snippets/bucket_lock_test.py
new file mode 100644
index 00000000000..9b7b4fa2a8e
--- /dev/null
+++ b/storage/samples/snippets/bucket_lock_test.py
@@ -0,0 +1,176 @@
+# Copyright 2018 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import uuid
+
+from google.cloud import storage
+import pytest
+
+import storage_disable_default_event_based_hold
+import storage_enable_default_event_based_hold
+import storage_get_default_event_based_hold
+import storage_get_retention_policy
+import storage_lock_retention_policy
+import storage_release_event_based_hold
+import storage_release_temporary_hold
+import storage_remove_retention_policy
+import storage_set_event_based_hold
+import storage_set_retention_policy
+import storage_set_temporary_hold
+
+
+BLOB_NAME = "storage_snippets_test_sigil"
+BLOB_CONTENT = "Hello, is it me you're looking for?"
+# Retention policy for 5 seconds
+RETENTION_POLICY = 5
+
+
+@pytest.fixture
+def bucket():
+ """Yields a bucket that is deleted after the test completes."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"bucket-lock-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create()
+ yield bucket
+ bucket.delete(force=True)
+
+
+def test_retention_policy_no_lock(bucket, capsys):
+ storage_set_retention_policy.set_retention_policy(
+ bucket.name, RETENTION_POLICY
+ )
+ bucket.reload()
+
+ assert bucket.retention_period is RETENTION_POLICY
+ assert bucket.retention_policy_effective_time is not None
+ assert bucket.retention_policy_locked is None
+
+ storage_get_retention_policy.get_retention_policy(bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"Retention Policy for {bucket.name}" in out
+ assert "Retention Period: 5" in out
+ assert "Effective Time: " in out
+ assert "Retention Policy is locked" not in out
+
+ blob = bucket.blob(BLOB_NAME)
+ blob.upload_from_string(BLOB_CONTENT)
+
+ assert blob.retention_expiration_time is not None
+
+ storage_remove_retention_policy.remove_retention_policy(bucket.name)
+ bucket.reload()
+ assert bucket.retention_period is None
+
+ time.sleep(RETENTION_POLICY)
+
+
+def test_retention_policy_lock(bucket, capsys):
+ storage_set_retention_policy.set_retention_policy(
+ bucket.name, RETENTION_POLICY
+ )
+ bucket.reload()
+ assert bucket.retention_policy_locked is None
+
+ storage_lock_retention_policy.lock_retention_policy(bucket.name)
+ bucket.reload()
+ assert bucket.retention_policy_locked is True
+
+ storage_get_retention_policy.get_retention_policy(bucket.name)
+ out, _ = capsys.readouterr()
+ assert "Retention Policy is locked" in out
+
+
+def test_enable_disable_bucket_default_event_based_hold(bucket, capsys):
+ storage_get_default_event_based_hold.get_default_event_based_hold(
+ bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Default event-based hold is not enabled for {bucket.name}"
+ in out
+ )
+ assert (
+ f"Default event-based hold is enabled for {bucket.name}"
+ not in out
+ )
+
+ storage_enable_default_event_based_hold.enable_default_event_based_hold(
+ bucket.name
+ )
+ bucket.reload()
+
+ assert bucket.default_event_based_hold is True
+
+ storage_get_default_event_based_hold.get_default_event_based_hold(
+ bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Default event-based hold is enabled for {bucket.name}" in out
+ )
+
+ # Changes to the bucket will be readable immediately after writing,
+ # but configuration changes may take time to propagate.
+ time.sleep(10)
+
+ blob = bucket.blob(BLOB_NAME)
+ blob.upload_from_string(BLOB_CONTENT)
+ assert blob.event_based_hold is True
+
+ storage_release_event_based_hold.release_event_based_hold(
+ bucket.name, blob.name
+ )
+ blob.reload()
+ assert blob.event_based_hold is False
+
+ storage_disable_default_event_based_hold.disable_default_event_based_hold(
+ bucket.name
+ )
+ bucket.reload()
+ assert bucket.default_event_based_hold is False
+
+
+def test_enable_disable_temporary_hold(bucket):
+ blob = bucket.blob(BLOB_NAME)
+ blob.upload_from_string(BLOB_CONTENT)
+ assert blob.temporary_hold is None
+
+ storage_set_temporary_hold.set_temporary_hold(bucket.name, blob.name)
+ blob.reload()
+ assert blob.temporary_hold is True
+
+ storage_release_temporary_hold.release_temporary_hold(
+ bucket.name, blob.name
+ )
+ blob.reload()
+ assert blob.temporary_hold is False
+
+
+def test_enable_disable_event_based_hold(bucket):
+ blob = bucket.blob(BLOB_NAME)
+ blob.upload_from_string(BLOB_CONTENT)
+ assert blob.event_based_hold is None
+
+ storage_set_event_based_hold.set_event_based_hold(bucket.name, blob.name)
+ blob.reload()
+ assert blob.event_based_hold is True
+
+ storage_release_event_based_hold.release_event_based_hold(
+ bucket.name, blob.name
+ )
+ blob.reload()
+ assert blob.event_based_hold is False
diff --git a/storage/samples/snippets/conftest.py b/storage/samples/snippets/conftest.py
new file mode 100644
index 00000000000..b0db57561d8
--- /dev/null
+++ b/storage/samples/snippets/conftest.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import uuid
+
+from google.cloud import storage
+import pytest
+
+
+@pytest.fixture(scope="function")
+def bucket():
+ """Yields a bucket that is deleted after the test completes."""
+ # The new projects enforces uniform bucket level access, so
+ # we need to use the old main project for now.
+ original_value = os.environ['GOOGLE_CLOUD_PROJECT']
+ os.environ['GOOGLE_CLOUD_PROJECT'] = os.environ['MAIN_GOOGLE_CLOUD_PROJECT']
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"uniform-bucket-level-access-{uuid.uuid4().hex}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create()
+ yield bucket
+ time.sleep(3)
+ bucket.delete(force=True)
+ # Set the value back.
+ os.environ['GOOGLE_CLOUD_PROJECT'] = original_value
diff --git a/storage/samples/snippets/encryption_test.py b/storage/samples/snippets/encryption_test.py
new file mode 100644
index 00000000000..f4d857dd88e
--- /dev/null
+++ b/storage/samples/snippets/encryption_test.py
@@ -0,0 +1,231 @@
+# Copyright 2016 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import os
+import tempfile
+import uuid
+
+from google.api_core.exceptions import NotFound
+from google.cloud import storage
+from google.cloud.storage import Blob
+import pytest
+
+import storage_download_encrypted_file
+import storage_generate_encryption_key
+import storage_object_csek_to_cmek
+import storage_rotate_encryption_key
+import storage_upload_encrypted_file
+import storage_get_bucket_encryption_enforcement_config
+import storage_set_bucket_encryption_enforcement_config
+import storage_update_bucket_encryption_enforcement_config
+from google.cloud.storage.bucket import EncryptionEnforcementConfig
+
+BUCKET = os.environ["CLOUD_STORAGE_BUCKET"]
+KMS_KEY = os.environ["MAIN_CLOUD_KMS_KEY"]
+
+TEST_ENCRYPTION_KEY = "brtJUWneL92g5q0N2gyDSnlPSYAiIVZ/cWgjyZNeMy0="
+TEST_ENCRYPTION_KEY_DECODED = base64.b64decode(TEST_ENCRYPTION_KEY)
+
+TEST_ENCRYPTION_KEY_2 = "o4OD7SWCaPjfeEGhAY+YCgMdY9UW+OJ8mvfWD9lNtO4="
+TEST_ENCRYPTION_KEY_2_DECODED = base64.b64decode(TEST_ENCRYPTION_KEY_2)
+
+
+def test_generate_encryption_key(capsys):
+ storage_generate_encryption_key.generate_encryption_key()
+ out, _ = capsys.readouterr()
+ encoded_key = out.split(":", 1).pop().strip()
+ key = base64.b64decode(encoded_key)
+ assert len(key) == 32, "Returned key should be 32 bytes"
+
+
+def test_upload_encrypted_blob():
+ blob_name = f"test_upload_encrypted_{uuid.uuid4().hex}"
+ with tempfile.NamedTemporaryFile() as source_file:
+ source_file.write(b"test")
+
+ storage_upload_encrypted_file.upload_encrypted_blob(
+ BUCKET,
+ source_file.name,
+ blob_name,
+ TEST_ENCRYPTION_KEY,
+ )
+ bucket = storage.Client().bucket(BUCKET)
+ bucket.delete_blob(blob_name)
+
+
+@pytest.fixture(scope="module")
+def test_blob():
+ """Provides a pre-existing blob in the test bucket."""
+ bucket = storage.Client().bucket(BUCKET)
+ blob_name = f"test_blob_{uuid.uuid4().hex}"
+ blob = Blob(
+ blob_name,
+ bucket,
+ encryption_key=TEST_ENCRYPTION_KEY_DECODED,
+ )
+ content = "Hello, is it me you're looking for?"
+ blob.upload_from_string(content)
+
+ yield blob.name, content
+
+ # To delete an encrypted blob, you have to provide the same key
+ # used for the blob. When you provide a wrong key, you'll get
+ # NotFound.
+ try:
+ # Clean up for the case that the rotation didn't occur.
+ blob.delete()
+ except NotFound as e:
+ # For the case that the rotation succeeded.
+ print(f"Ignoring 404, detail: {e}")
+ blob = Blob(blob_name, bucket, encryption_key=TEST_ENCRYPTION_KEY_2_DECODED)
+ blob.delete()
+
+
+def test_download_blob(test_blob):
+ test_blob_name, test_blob_content = test_blob
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_encrypted_file.download_encrypted_blob(
+ BUCKET, test_blob_name, dest_file.name, TEST_ENCRYPTION_KEY
+ )
+
+ downloaded_content = dest_file.read().decode("utf-8")
+ assert downloaded_content == test_blob_content
+
+
+def test_rotate_encryption_key(test_blob):
+ test_blob_name, test_blob_content = test_blob
+ storage_rotate_encryption_key.rotate_encryption_key(
+ BUCKET, test_blob_name, TEST_ENCRYPTION_KEY, TEST_ENCRYPTION_KEY_2
+ )
+
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_encrypted_file.download_encrypted_blob(
+ BUCKET, test_blob_name, dest_file.name, TEST_ENCRYPTION_KEY_2
+ )
+
+ downloaded_content = dest_file.read().decode("utf-8")
+ assert downloaded_content == test_blob_content
+
+
+def test_object_csek_to_cmek(test_blob):
+ test_blob_name, test_blob_content = test_blob
+ cmek_blob = storage_object_csek_to_cmek.object_csek_to_cmek(
+ BUCKET, test_blob_name, TEST_ENCRYPTION_KEY_2, KMS_KEY
+ )
+
+ assert cmek_blob.download_as_bytes(), test_blob_content
+
+
+@pytest.fixture
+def enforcement_bucket():
+ bucket_name = f"test_encryption_enforcement_{uuid.uuid4().hex}"
+ yield bucket_name
+
+ storage_client = storage.Client()
+ try:
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.delete(force=True)
+ except Exception:
+ pass
+
+
+def create_enforcement_bucket(bucket_name):
+ """Sets up a bucket with GMEK AND CSEK Restricted"""
+ client = storage.Client()
+ bucket = client.bucket(bucket_name)
+
+ bucket.encryption.google_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+ bucket.encryption.customer_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="NotRestricted")
+ )
+ bucket.encryption.customer_supplied_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+
+ bucket.create()
+ return bucket
+
+
+def test_set_bucket_encryption_enforcement_config(enforcement_bucket):
+ storage_set_bucket_encryption_enforcement_config.set_bucket_encryption_enforcement_config(
+ enforcement_bucket
+ )
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(enforcement_bucket)
+
+ assert (
+ bucket.encryption.google_managed_encryption_enforcement_config.restriction_mode
+ == "FullyRestricted"
+ )
+ assert (
+ bucket.encryption.customer_managed_encryption_enforcement_config.restriction_mode
+ == "NotRestricted"
+ )
+ assert (
+ bucket.encryption.customer_supplied_encryption_enforcement_config.restriction_mode
+ == "FullyRestricted"
+ )
+
+
+def test_get_bucket_encryption_enforcement_config(enforcement_bucket, capsys):
+ # Pre-setup: Creating a bucket
+ create_enforcement_bucket(enforcement_bucket)
+
+ storage_get_bucket_encryption_enforcement_config.get_bucket_encryption_enforcement_config(
+ enforcement_bucket
+ )
+
+ out, _ = capsys.readouterr()
+ assert f"Encryption Enforcement Config for bucket {enforcement_bucket}" in out
+ assert (
+ "Customer-managed encryption enforcement config restriction mode: NotRestricted"
+ in out
+ )
+ assert (
+ "Customer-supplied encryption enforcement config restriction mode: FullyRestricted"
+ in out
+ )
+ assert (
+ "Google-managed encryption enforcement config restriction mode: FullyRestricted"
+ in out
+ )
+
+
+def test_update_encryption_enforcement_config(enforcement_bucket):
+ # Pre-setup: Create a bucket in a different state before update
+ create_enforcement_bucket(enforcement_bucket)
+
+ storage_update_bucket_encryption_enforcement_config.update_bucket_encryption_enforcement_config(
+ enforcement_bucket
+ )
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(enforcement_bucket)
+
+ assert (
+ bucket.encryption.google_managed_encryption_enforcement_config.restriction_mode
+ == "NotRestricted"
+ )
+ assert (
+ bucket.encryption.customer_managed_encryption_enforcement_config.restriction_mode
+ == "FullyRestricted"
+ )
+ assert (
+ bucket.encryption.customer_supplied_encryption_enforcement_config.restriction_mode
+ == "FullyRestricted"
+ )
diff --git a/storage/samples/snippets/fileio_test.py b/storage/samples/snippets/fileio_test.py
new file mode 100644
index 00000000000..b8a4b8272f4
--- /dev/null
+++ b/storage/samples/snippets/fileio_test.py
@@ -0,0 +1,35 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+
+import storage_fileio_pandas
+import storage_fileio_write_read
+
+
+def test_fileio_write_read(bucket, capsys):
+ blob_name = f"test-fileio-{uuid.uuid4()}"
+ storage_fileio_write_read.write_read(bucket.name, blob_name)
+ out, _ = capsys.readouterr()
+ assert "Hello world" in out
+
+
+def test_fileio_pandas(bucket, capsys):
+ blob_name = f"test-fileio-{uuid.uuid4()}"
+ storage_fileio_pandas.pandas_write(bucket.name, blob_name)
+ out, _ = capsys.readouterr()
+ assert f"Wrote csv with pandas with name {blob_name} from bucket {bucket.name}." in out
+ storage_fileio_pandas.pandas_read(bucket.name, blob_name)
+ out, _ = capsys.readouterr()
+ assert f"Read csv with pandas with name {blob_name} from bucket {bucket.name}." in out
diff --git a/storage/samples/snippets/hmac_samples_test.py b/storage/samples/snippets/hmac_samples_test.py
new file mode 100644
index 00000000000..fbc2e292df6
--- /dev/null
+++ b/storage/samples/snippets/hmac_samples_test.py
@@ -0,0 +1,139 @@
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Tests for hmac.py. Requires GOOGLE_CLOUD_PROJECT (valid project) and
+HMAC_KEY_TEST_SERVICE_ACCOUNT (valid service account email) env variables to be
+set in order to run.
+"""
+
+import os
+
+import google.api_core.exceptions
+from google.cloud import storage
+import pytest
+
+import storage_activate_hmac_key
+import storage_create_hmac_key
+import storage_deactivate_hmac_key
+import storage_delete_hmac_key
+import storage_get_hmac_key
+import storage_list_hmac_keys
+
+# We are reaching maximum number of HMAC keys on the service account.
+# We change the service account based on the value of
+# RUN_TESTS_SESSION in noxfile_config.py.
+# The reason we can not use multiple project is that our new projects
+# are enforced to have
+# 'constraints/iam.disableServiceAccountKeyCreation' policy.
+
+PROJECT_ID = os.environ["MAIN_GOOGLE_CLOUD_PROJECT"]
+SERVICE_ACCOUNT_EMAIL = os.environ["HMAC_KEY_TEST_SERVICE_ACCOUNT"]
+STORAGE_CLIENT = storage.Client(project=PROJECT_ID)
+
+
+@pytest.fixture(scope="module")
+def new_hmac_key():
+ """
+ Fixture to create a new HMAC key, and to guarantee all keys are deleted at
+ the end of the module.
+
+ NOTE: Due to the module scope, test order in this file is significant
+ """
+ try:
+ hmac_key, secret = STORAGE_CLIENT.create_hmac_key(
+ service_account_email=SERVICE_ACCOUNT_EMAIL, project_id=PROJECT_ID
+ )
+ except google.api_core.exceptions.PreconditionFailed as e:
+ # Check if the failure is due to the Organization Policy constraint
+ if "constraints/iam.disableServiceAccountKeyCreation" in str(e):
+ pytest.skip(
+ "Temporary skip: HMAC key creation is disabled by organization policy "
+ "on project python-docs-samples-tests. See b/493225655."
+ )
+ raise
+ yield hmac_key
+ # Re-fetch the key metadata in case state has changed during the test.
+ hmac_key = STORAGE_CLIENT.get_hmac_key_metadata(
+ hmac_key.access_id, project_id=PROJECT_ID
+ )
+ if hmac_key.state == "DELETED":
+ return
+ if not hmac_key.state == "INACTIVE":
+ hmac_key.state = "INACTIVE"
+ hmac_key.update()
+ try:
+ hmac_key.delete()
+ except google.api_core.exceptions.BadRequest:
+ pass
+
+
+def test_list_keys(capsys, new_hmac_key):
+ hmac_keys = storage_list_hmac_keys.list_keys(PROJECT_ID)
+ assert "HMAC Keys:" in capsys.readouterr().out
+ assert hmac_keys.num_results >= 1
+
+
+def test_create_key(capsys):
+ try:
+ hmac_key = storage_create_hmac_key.create_key(PROJECT_ID, SERVICE_ACCOUNT_EMAIL)
+ except google.api_core.exceptions.PreconditionFailed as e:
+ if "constraints/iam.disableServiceAccountKeyCreation" in str(e):
+ pytest.skip(
+ "Temporary skip: HMAC key creation is disabled by organization policy "
+ "on project python-docs-samples-tests. See b/493225655."
+ )
+ raise
+
+ hmac_key.state = "INACTIVE"
+ hmac_key.update()
+ hmac_key.delete()
+ assert "Key ID:" in capsys.readouterr().out
+ assert hmac_key.access_id
+
+
+def test_get_key(capsys, new_hmac_key):
+ hmac_key = storage_get_hmac_key.get_key(new_hmac_key.access_id, PROJECT_ID)
+ assert "HMAC key metadata" in capsys.readouterr().out
+ assert hmac_key.access_id == new_hmac_key.access_id
+
+
+def test_activate_key(capsys, new_hmac_key):
+ new_hmac_key.state = "INACTIVE"
+ new_hmac_key.update()
+ hmac_key = storage_activate_hmac_key.activate_key(
+ new_hmac_key.access_id, PROJECT_ID
+ )
+ assert "State: ACTIVE" in capsys.readouterr().out
+ assert hmac_key.state == "ACTIVE"
+
+
+def test_deactivate_key(capsys, new_hmac_key):
+ hmac_key = storage_deactivate_hmac_key.deactivate_key(
+ new_hmac_key.access_id, PROJECT_ID
+ )
+ assert "State: INACTIVE" in capsys.readouterr().out
+ assert hmac_key.state == "INACTIVE"
+
+
+def test_delete_key(capsys, new_hmac_key):
+ # Due to reuse of the HMAC key for each test function, the previous
+ # test has deactivated the key already.
+ try:
+ new_hmac_key.state = "INACTIVE"
+ new_hmac_key.update()
+ except google.api_core.exceptions.BadRequest:
+ pass
+
+ storage_delete_hmac_key.delete_key(new_hmac_key.access_id, PROJECT_ID)
+ assert "The key is deleted" in capsys.readouterr().out
diff --git a/storage/samples/snippets/iam_test.py b/storage/samples/snippets/iam_test.py
new file mode 100644
index 00000000000..7700b6c6a8a
--- /dev/null
+++ b/storage/samples/snippets/iam_test.py
@@ -0,0 +1,149 @@
+# Copyright 2017 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import time
+import uuid
+
+from google.cloud import storage
+import pytest
+
+import storage_add_bucket_conditional_iam_binding
+import storage_add_bucket_iam_member
+import storage_remove_bucket_conditional_iam_binding
+import storage_remove_bucket_iam_member
+import storage_set_bucket_public_iam
+import storage_view_bucket_iam_members
+
+MEMBER = "group:dpebot@google.com"
+ROLE = "roles/storage.legacyBucketReader"
+
+CONDITION_TITLE = "match-prefix"
+CONDITION_DESCRIPTION = "Applies to objects matching a prefix"
+CONDITION_EXPRESSION = (
+ 'resource.name.startsWith("projects/_/buckets/bucket-name/objects/prefix-a-")'
+)
+
+
+@pytest.fixture(scope="module")
+def bucket():
+ bucket = None
+ while bucket is None or bucket.exists():
+ storage_client = storage.Client()
+ bucket_name = f"test-iam-{uuid.uuid4()}"
+ bucket = storage_client.bucket(bucket_name)
+ bucket.iam_configuration.uniform_bucket_level_access_enabled = True
+ storage_client.create_bucket(bucket)
+ yield bucket
+ time.sleep(3)
+ bucket.delete(force=True)
+
+
+@pytest.fixture(scope="function")
+def public_bucket():
+ # The new projects don't allow to make a bucket available to public, so
+ # we need to use the old main project for now.
+ original_value = os.environ['GOOGLE_CLOUD_PROJECT']
+ os.environ['GOOGLE_CLOUD_PROJECT'] = os.environ['MAIN_GOOGLE_CLOUD_PROJECT']
+ bucket = None
+ while bucket is None or bucket.exists():
+ storage_client = storage.Client()
+ bucket_name = f"test-iam-{uuid.uuid4()}"
+ bucket = storage_client.bucket(bucket_name)
+ bucket.iam_configuration.uniform_bucket_level_access_enabled = True
+ storage_client.create_bucket(bucket)
+ yield bucket
+ time.sleep(3)
+ bucket.delete(force=True)
+ # Set the value back.
+ os.environ['GOOGLE_CLOUD_PROJECT'] = original_value
+
+
+def test_view_bucket_iam_members(capsys, bucket):
+ storage_view_bucket_iam_members.view_bucket_iam_members(bucket.name)
+ assert re.match("Role: .*, Members: .*", capsys.readouterr().out)
+
+
+def test_add_bucket_iam_member(bucket):
+ storage_add_bucket_iam_member.add_bucket_iam_member(bucket.name, ROLE, MEMBER)
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+ assert any(
+ binding["role"] == ROLE and MEMBER in binding["members"]
+ for binding in policy.bindings
+ )
+
+
+def test_add_bucket_conditional_iam_binding(bucket):
+ storage_add_bucket_conditional_iam_binding.add_bucket_conditional_iam_binding(
+ bucket.name,
+ ROLE,
+ CONDITION_TITLE,
+ CONDITION_DESCRIPTION,
+ CONDITION_EXPRESSION,
+ {MEMBER},
+ )
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+ assert any(
+ binding["role"] == ROLE
+ and binding["members"] == {MEMBER}
+ and binding["condition"]
+ == {
+ "title": CONDITION_TITLE,
+ "description": CONDITION_DESCRIPTION,
+ "expression": CONDITION_EXPRESSION,
+ }
+ for binding in policy.bindings
+ )
+
+
+def test_remove_bucket_iam_member(public_bucket):
+ storage_remove_bucket_iam_member.remove_bucket_iam_member(
+ public_bucket.name, ROLE, MEMBER)
+
+ policy = public_bucket.get_iam_policy(requested_policy_version=3)
+ assert not any(
+ binding["role"] == ROLE and MEMBER in binding["members"]
+ for binding in policy.bindings
+ )
+
+
+def test_remove_bucket_conditional_iam_binding(bucket):
+ storage_remove_bucket_conditional_iam_binding.remove_bucket_conditional_iam_binding(
+ bucket.name, ROLE, CONDITION_TITLE, CONDITION_DESCRIPTION, CONDITION_EXPRESSION
+ )
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+ condition = {
+ "title": CONDITION_TITLE,
+ "description": CONDITION_DESCRIPTION,
+ "expression": CONDITION_EXPRESSION,
+ }
+ assert not any(
+ (binding["role"] == ROLE and binding.get("condition") == condition)
+ for binding in policy.bindings
+ )
+
+
+def test_set_bucket_public_iam(public_bucket):
+ # The test project has org policy restricting identities by domain.
+ # Testing "domain:google.com" instead of "allUsers"
+ storage_set_bucket_public_iam.set_bucket_public_iam(public_bucket.name, ["domain:google.com"])
+ policy = public_bucket.get_iam_policy(requested_policy_version=3)
+
+ assert any(
+ binding["role"] == "roles/storage.objectViewer"
+ and "domain:google.com" in binding["members"]
+ for binding in policy.bindings
+ )
diff --git a/storage/samples/snippets/notification_polling.py b/storage/samples/snippets/notification_polling.py
new file mode 100644
index 00000000000..1359c9cfa19
--- /dev/null
+++ b/storage/samples/snippets/notification_polling.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+# Copyright 2017 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This application demonstrates how to poll for GCS notifications from a
+Cloud Pub/Sub subscription, parse the incoming message, and acknowledge the
+successful processing of the message.
+
+This application will work with any subscription configured for pull rather
+than push notifications. If you do not already have notifications configured,
+you may consult the docs at
+https://cloud.google.com/storage/docs/reporting-changes or follow the steps
+below:
+
+1. First, follow the common setup steps for these snippets, specically
+ configuring auth and installing dependencies. See the README's "Setup"
+ section.
+
+2. Activate the Google Cloud Pub/Sub API, if you have not already done so.
+ https://console.cloud.google.com/flows/enableapi?apiid=pubsub
+
+3. Create a Google Cloud Storage bucket:
+ $ gcloud storage buckets create gs://testbucket
+
+4. Create a Cloud Pub/Sub topic and publish bucket notifications there:
+ $ gcloud storage buckets notifications create gs://testbucket --topic=testtopic --payload-format=json
+
+5. Create a subscription for your new topic:
+ $ gcloud pubsub subscriptions create testsubscription --topic=testtopic
+
+6. Run this program:
+ $ python notification_polling.py my-project-id testsubscription
+
+7. While the program is running, upload and delete some files in the testbucket
+ bucket (you could use the console or gsutil) and watch as changes scroll by
+ in the app.
+"""
+
+import argparse
+import json
+import time
+
+from google.cloud import pubsub_v1
+
+
+def summarize(message):
+ data = message.data.decode("utf-8")
+ attributes = message.attributes
+
+ event_type = attributes["eventType"]
+ bucket_id = attributes["bucketId"]
+ object_id = attributes["objectId"]
+ generation = attributes["objectGeneration"]
+ description = (
+ "\tEvent type: {event_type}\n"
+ "\tBucket ID: {bucket_id}\n"
+ "\tObject ID: {object_id}\n"
+ "\tGeneration: {generation}\n"
+ ).format(
+ event_type=event_type,
+ bucket_id=bucket_id,
+ object_id=object_id,
+ generation=generation,
+ )
+
+ if "overwroteGeneration" in attributes:
+ description += f"\tOverwrote generation: {attributes['overwroteGeneration']}\n"
+ if "overwrittenByGeneration" in attributes:
+ description += f"\tOverwritten by generation: {attributes['overwrittenByGeneration']}\n"
+
+ payload_format = attributes["payloadFormat"]
+ if payload_format == "JSON_API_V1":
+ object_metadata = json.loads(data)
+ size = object_metadata["size"]
+ content_type = object_metadata["contentType"]
+ metageneration = object_metadata["metageneration"]
+ description += (
+ "\tContent type: {content_type}\n"
+ "\tSize: {object_size}\n"
+ "\tMetageneration: {metageneration}\n"
+ ).format(
+ content_type=content_type,
+ object_size=size,
+ metageneration=metageneration,
+ )
+ return description
+
+
+def poll_notifications(project, subscription_name):
+ """Polls a Cloud Pub/Sub subscription for new GCS events for display."""
+ subscriber = pubsub_v1.SubscriberClient()
+ subscription_path = subscriber.subscription_path(
+ project, subscription_name
+ )
+
+ def callback(message):
+ print(f"Received message:\n{summarize(message)}")
+ message.ack()
+
+ subscriber.subscribe(subscription_path, callback=callback)
+
+ # The subscriber is non-blocking, so we must keep the main thread from
+ # exiting to allow it to process messages in the background.
+ print(f"Listening for messages on {subscription_path}")
+ while True:
+ time.sleep(60)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "project", help="The ID of the project that owns the subscription"
+ )
+ parser.add_argument(
+ "subscription", help="The ID of the Pub/Sub subscription"
+ )
+ args = parser.parse_args()
+ poll_notifications(args.project, args.subscription)
diff --git a/storage/samples/snippets/notification_polling_test.py b/storage/samples/snippets/notification_polling_test.py
new file mode 100644
index 00000000000..dfb241b842d
--- /dev/null
+++ b/storage/samples/snippets/notification_polling_test.py
@@ -0,0 +1,55 @@
+# Copyright 2017 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.cloud.pubsub_v1.subscriber.message import Message
+import mock
+
+from notification_polling import summarize
+
+
+MESSAGE_ID = 12345
+
+
+def test_parse_json_message():
+ attributes = {
+ "eventType": "OBJECT_FINALIZE",
+ "bucketId": "mybucket",
+ "objectId": "myobject",
+ "objectGeneration": 1234567,
+ "resource": "projects/_/buckets/mybucket/objects/myobject#1234567",
+ "notificationConfig": (
+ "projects/_/buckets/mybucket/" "notificationConfigs/5"
+ ),
+ "payloadFormat": "JSON_API_V1",
+ }
+ data = (
+ b"{"
+ b' "size": 12345,'
+ b' "contentType": "text/html",'
+ b' "metageneration": 1'
+ b"}"
+ )
+ message = Message(
+ mock.Mock(data=data, attributes=attributes, publish_time=mock.Mock(seconds=0.0, nanos=0.0)), MESSAGE_ID, delivery_attempt=0, request_queue=mock.Mock()
+ )
+ assert summarize(message) == (
+ "\tEvent type: OBJECT_FINALIZE\n"
+ "\tBucket ID: mybucket\n"
+ "\tObject ID: myobject\n"
+ "\tGeneration: 1234567\n"
+ "\tContent type: text/html\n"
+ "\tSize: 12345\n"
+ "\tMetageneration: 1\n"
+ )
diff --git a/storage/samples/snippets/notification_test.py b/storage/samples/snippets/notification_test.py
new file mode 100644
index 00000000000..a2fdbe3ef39
--- /dev/null
+++ b/storage/samples/snippets/notification_test.py
@@ -0,0 +1,120 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import uuid
+
+from google.api_core.exceptions import NotFound
+from google.cloud import storage
+
+import pytest
+
+import storage_create_bucket_notifications
+import storage_delete_bucket_notification
+import storage_list_bucket_notifications
+import storage_print_pubsub_bucket_notification
+
+_topic_name = f"notification-{uuid.uuid4()}"
+
+
+@pytest.fixture(scope="module")
+def storage_client():
+ return storage.Client()
+
+
+@pytest.fixture(scope="module")
+def publisher_client():
+ try:
+ from google.cloud.pubsub_v1 import PublisherClient
+ except ImportError:
+ pytest.skip("Cannot import pubsub")
+
+ return PublisherClient()
+
+
+@pytest.fixture(scope="module")
+def _notification_topic(storage_client, publisher_client):
+ topic_path = publisher_client.topic_path(storage_client.project, _topic_name)
+ try:
+ topic = publisher_client.get_topic(request={"topic": topic_path})
+ except NotFound:
+ topic = publisher_client.create_topic(request={"name": topic_path})
+
+ policy = publisher_client.get_iam_policy(request={"resource": topic_path})
+ binding = policy.bindings.add()
+ binding.role = "roles/pubsub.publisher"
+ binding.members.append(
+ f"serviceAccount:{storage_client.get_service_account_email()}"
+ )
+ publisher_client.set_iam_policy(request={"resource": topic_path, "policy": policy})
+
+ yield topic
+
+ try:
+ publisher_client.delete_topic(request={"topic": topic.name})
+ except NotFound:
+ pass
+
+
+@pytest.fixture(scope="module")
+def bucket_w_notification(storage_client, _notification_topic):
+ """Yields a bucket with notification that is deleted after the tests complete."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"notification-test-{uuid.uuid4()}"
+ bucket = storage_client.bucket(bucket_name)
+ bucket.create()
+
+ notification = bucket.notification(topic_name=_topic_name)
+ notification.create()
+
+ yield bucket
+
+ bucket.delete(force=True)
+
+
+def test_list_bucket_notifications(bucket_w_notification, capsys):
+ storage_list_bucket_notifications.list_bucket_notifications(bucket_w_notification.name)
+ out, _ = capsys.readouterr()
+ assert "Notification ID" in out
+
+
+def test_print_pubsub_bucket_notification(bucket_w_notification, capsys):
+ notification_id = 1
+ storage_print_pubsub_bucket_notification.print_pubsub_bucket_notification(bucket_w_notification.name, notification_id)
+ out, _ = capsys.readouterr()
+ assert "Notification ID: 1" in out
+
+
+def test_create_bucket_notifications(bucket_w_notification, capsys):
+ # test only bucket notification ID 1 was created in the fixture
+ assert bucket_w_notification.notification(notification_id=1).exists() is True
+ assert bucket_w_notification.notification(notification_id=2).exists() is False
+
+ storage_create_bucket_notifications.create_bucket_notifications(bucket_w_notification.name, _topic_name)
+ out, _ = capsys.readouterr()
+ assert "Successfully created notification" in out
+ # test succesfully creates new bucket notification with ID 2
+ assert bucket_w_notification.notification(notification_id=2).exists() is True
+
+
+def test_delete_bucket_notification(bucket_w_notification, capsys):
+ # test bucket notification ID 1 was created in the fixture
+ notification_id = 1
+ assert bucket_w_notification.notification(notification_id=notification_id).exists() is True
+
+ storage_delete_bucket_notification.delete_bucket_notification(bucket_w_notification.name, notification_id)
+ out, _ = capsys.readouterr()
+ assert "Successfully deleted notification" in out
+ assert bucket_w_notification.notification(notification_id=notification_id).exists() is False
diff --git a/storage/samples/snippets/noxfile.py b/storage/samples/snippets/noxfile.py
new file mode 100644
index 00000000000..69bcaf56de6
--- /dev/null
+++ b/storage/samples/snippets/noxfile.py
@@ -0,0 +1,292 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import glob
+import os
+from pathlib import Path
+import sys
+from typing import Callable, Dict, Optional
+
+import nox
+
+
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# DO NOT EDIT THIS FILE EVER!
+# WARNING - WARNING - WARNING - WARNING - WARNING
+# WARNING - WARNING - WARNING - WARNING - WARNING
+
+BLACK_VERSION = "black==22.3.0"
+ISORT_VERSION = "isort==5.10.1"
+
+# Copy `noxfile_config.py` to your directory and modify it instead.
+
+# `TEST_CONFIG` dict is a configuration hook that allows users to
+# modify the test configurations. The values here should be in sync
+# with `noxfile_config.py`. Users will copy `noxfile_config.py` into
+# their directory and modify it.
+
+TEST_CONFIG = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
+
+
+try:
+ # Ensure we can import noxfile_config in the project's directory.
+ sys.path.append(".")
+ from noxfile_config import TEST_CONFIG_OVERRIDE
+except ImportError as e:
+ print("No user noxfile_config found: detail: {}".format(e))
+ TEST_CONFIG_OVERRIDE = {}
+
+# Update the TEST_CONFIG with the user supplied values.
+TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
+
+
+def get_pytest_env_vars() -> Dict[str, str]:
+ """Returns a dict for pytest invocation."""
+ ret = {}
+
+ # Override the GCLOUD_PROJECT and the alias.
+ env_key = TEST_CONFIG["gcloud_project_env"]
+ # This should error out if not set.
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
+
+ # Apply user supplied envs.
+ ret.update(TEST_CONFIG["envs"])
+ return ret
+
+
+# DO NOT EDIT - automatically generated.
+# All versions used to test samples.
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
+
+# Any default versions that should be ignored.
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
+
+TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
+
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+#
+# Style Checks
+#
+
+
+# Linting with flake8.
+#
+# We ignore the following rules:
+# E203: whitespace before ‘:’
+# E266: too many leading ‘#’ for block comment
+# E501: line too long
+# I202: Additional newline in a section of imports
+#
+# We also need to specify the rules which are ignored by default:
+# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121']
+FLAKE8_COMMON_ARGS = [
+ "--show-source",
+ "--builtin=gettext",
+ "--max-complexity=20",
+ "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py",
+ "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202",
+ "--max-line-length=88",
+]
+
+
+@nox.session
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8")
+ else:
+ session.install("flake8", "flake8-annotations")
+
+ args = FLAKE8_COMMON_ARGS + [
+ ".",
+ ]
+ session.run("flake8", *args)
+
+
+#
+# Black
+#
+
+
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
+
+#
+# format = isort + black
+#
+
+@nox.session
+def format(session: nox.sessions.Session) -> None:
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run("isort", "--fss", *python_files)
+ session.run("black", *python_files)
+
+
+#
+# Sample Tests
+#
+
+
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True)
+ test_list.extend(glob.glob("**/tests", recursive=True))
+
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ return
+
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ concurrent_args = []
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+ with open("requirements.txt") as rfile:
+ packages = rfile.read()
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+ with open("requirements-test.txt") as rtfile:
+ packages += rtfile.read()
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ if "pytest-parallel" in packages:
+ concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto'])
+ elif "pytest-xdist" in packages:
+ concurrent_args.extend(['-n', 'auto'])
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
+
+
+@nox.session(python=ALL_VERSIONS)
+def py(session: nox.sessions.Session) -> None:
+ """Runs py.test for a sample using the specified version of Python."""
+ if session.python in TESTED_VERSIONS:
+ _session_tests(session)
+ else:
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
+
+
+#
+# Readmegen
+#
+
+
+def _get_repo_root() -> Optional[str]:
+ """ Returns the root folder of the project. """
+ # Get root of this repository. Assume we don't have directories nested deeper than 10 items.
+ p = Path(os.getcwd())
+ for i in range(10):
+ if p is None:
+ break
+ if Path(p / ".git").exists():
+ return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
+ p = p.parent
+ raise Exception("Unable to detect repository root.")
+
+
+GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")])
+
+
+@nox.session
+@nox.parametrize("path", GENERATED_READMES)
+def readmegen(session: nox.sessions.Session, path: str) -> None:
+ """(Re-)generates the readme for a sample."""
+ session.install("jinja2", "pyyaml")
+ dir_ = os.path.dirname(path)
+
+ if os.path.exists(os.path.join(dir_, "requirements.txt")):
+ session.install("-r", os.path.join(dir_, "requirements.txt"))
+
+ in_file = os.path.join(dir_, "README.rst.in")
+ session.run(
+ "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file
+ )
diff --git a/storage/samples/snippets/noxfile_config.py b/storage/samples/snippets/noxfile_config.py
new file mode 100644
index 00000000000..7eba203a4b4
--- /dev/null
+++ b/storage/samples/snippets/noxfile_config.py
@@ -0,0 +1,107 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be imported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
+
+import os
+
+
+# We are reaching maximum number of HMAC keys on the service account.
+# We change the service account based on the value of
+# RUN_TESTS_SESSION. The reason we can not use multiple project is
+# that our new projects are enforced to have
+# 'constraints/iam.disableServiceAccountKeyCreation' policy.
+def get_service_account_email():
+ session = os.environ.get('RUN_TESTS_SESSION')
+ if session == 'py-3.6':
+ return ('py36-storage-test@'
+ 'python-docs-samples-tests.iam.gserviceaccount.com')
+ if session == 'py-3.7':
+ return ('py37-storage-test@'
+ 'python-docs-samples-tests.iam.gserviceaccount.com')
+ if session == 'py-3.8':
+ return ('py38-storage-test@'
+ 'python-docs-samples-tests.iam.gserviceaccount.com')
+ if session == 'py-3.9':
+ return ('py39-storage-test@'
+ 'python-docs-samples-tests.iam.gserviceaccount.com')
+ if session == 'py-3.10':
+ return ('py310-storage-test@'
+ 'python-docs-samples-tests.iam.gserviceaccount.com')
+ return os.environ['HMAC_KEY_TEST_SERVICE_ACCOUNT']
+
+
+# We change the value of CLOUD_KMS_KEY based on the value of
+# RUN_TESTS_SESSION.
+def get_cloud_kms_key():
+ session = os.environ.get('RUN_TESTS_SESSION')
+ if session == 'py-3.6':
+ return ('projects/python-docs-samples-tests-py36/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.7':
+ return ('projects/python-docs-samples-tests-py37/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.8':
+ return ('projects/python-docs-samples-tests-py38/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.9':
+ return ('projects/python-docs-samples-tests-py39/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.10':
+ return ('projects/python-docs-samples-tests-310/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.11':
+ return ('projects/python-docs-samples-tests-311/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.12':
+ return ('projects/python-docs-samples-tests-312/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ if session == 'py-3.13':
+ return ('projects/python-docs-samples-tests-313/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ return os.environ['CLOUD_KMS_KEY']
+
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ 'ignored_versions': ["2.7", "3.6", "3.7", "3.11", "3.12", "3.13"],
+
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ # 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ 'envs': {
+ 'HMAC_KEY_TEST_SERVICE_ACCOUNT': get_service_account_email(),
+ 'CLOUD_KMS_KEY': get_cloud_kms_key(),
+ # Some tests can not use multiple projects because of several reasons:
+ # 1. The new projects is enforced to have the
+ # 'constraints/iam.disableServiceAccountKeyCreation' policy.
+ # 2. The new projects buckets need to have universal permission model.
+ # For those tests, we'll use the original project.
+ 'MAIN_GOOGLE_CLOUD_PROJECT': 'python-docs-samples-tests',
+ 'MAIN_CLOUD_KMS_KEY': ('projects/python-docs-samples-tests/locations/us/'
+ 'keyRings/gcs-kms-key-ring/cryptoKeys/gcs-kms-key')
+ },
+}
diff --git a/storage/samples/snippets/public_access_prevention_test.py b/storage/samples/snippets/public_access_prevention_test.py
new file mode 100644
index 00000000000..558a4ef1575
--- /dev/null
+++ b/storage/samples/snippets/public_access_prevention_test.py
@@ -0,0 +1,39 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import storage_get_public_access_prevention
+import storage_set_public_access_prevention_enforced
+import storage_set_public_access_prevention_inherited
+
+
+def test_get_public_access_prevention(bucket, capsys):
+ short_name = storage_get_public_access_prevention
+ short_name.get_public_access_prevention(bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"Public access prevention is inherited for {bucket.name}." in out
+
+
+def test_set_public_access_prevention_enforced(bucket, capsys):
+ short_name = storage_set_public_access_prevention_enforced
+ short_name.set_public_access_prevention_enforced(bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"Public access prevention is set to enforced for {bucket.name}." in out
+
+
+def test_set_public_access_prevention_inherited(bucket, capsys):
+ short_name = storage_set_public_access_prevention_inherited
+ short_name.set_public_access_prevention_inherited(bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"Public access prevention is 'inherited' for {bucket.name}." in out
diff --git a/storage/samples/snippets/quickstart.py b/storage/samples/snippets/quickstart.py
new file mode 100644
index 00000000000..54148b1fb55
--- /dev/null
+++ b/storage/samples/snippets/quickstart.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def run_quickstart():
+ # [START storage_quickstart]
+ # Imports the Google Cloud client library
+ from google.cloud import storage
+
+ # Instantiates a client
+ storage_client = storage.Client()
+
+ # The name for the new bucket
+ bucket_name = "my-new-bucket"
+
+ # Creates the new bucket
+ bucket = storage_client.create_bucket(bucket_name)
+
+ print(f"Bucket {bucket.name} created.")
+ # [END storage_quickstart]
+
+
+if __name__ == "__main__":
+ run_quickstart()
diff --git a/storage/samples/snippets/quickstart_test.py b/storage/samples/snippets/quickstart_test.py
new file mode 100644
index 00000000000..f6e06ad93e8
--- /dev/null
+++ b/storage/samples/snippets/quickstart_test.py
@@ -0,0 +1,28 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+import quickstart
+
+
+@mock.patch("google.cloud.storage.client.Client.create_bucket")
+def test_quickstart(create_bucket_mock, capsys):
+ # Unlike other quickstart tests, this one mocks out the creation
+ # because buckets are expensive, globally-namespaced object.
+ create_bucket_mock.return_value = mock.sentinel.bucket
+
+ quickstart.run_quickstart()
+
+ create_bucket_mock.assert_called_with("my-new-bucket")
diff --git a/storage/samples/snippets/requester_pays_test.py b/storage/samples/snippets/requester_pays_test.py
new file mode 100644
index 00000000000..4bef0cb8968
--- /dev/null
+++ b/storage/samples/snippets/requester_pays_test.py
@@ -0,0 +1,73 @@
+# Copyright 2017 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import backoff
+import os
+import tempfile
+
+from google.api_core.exceptions import GoogleAPIError
+from google.cloud import storage
+import pytest
+
+import storage_disable_requester_pays
+import storage_download_file_requester_pays
+import storage_enable_requester_pays
+import storage_get_requester_pays_status
+
+
+# We use a different bucket from other tests.
+# The service account for the test needs to have Billing Project Manager role
+# in order to make changes on buckets with requester pays enabled.
+BUCKET = os.environ["REQUESTER_PAYS_TEST_BUCKET"]
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_enable_requester_pays(capsys):
+ storage_enable_requester_pays.enable_requester_pays(BUCKET)
+ out, _ = capsys.readouterr()
+ assert f"Requester Pays has been enabled for {BUCKET}" in out
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_disable_requester_pays(capsys):
+ storage_disable_requester_pays.disable_requester_pays(BUCKET)
+ out, _ = capsys.readouterr()
+ assert f"Requester Pays has been disabled for {BUCKET}" in out
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_get_requester_pays_status(capsys):
+ storage_get_requester_pays_status.get_requester_pays_status(BUCKET)
+ out, _ = capsys.readouterr()
+ assert f"Requester Pays is disabled for {BUCKET}" in out
+
+
+@pytest.fixture
+def test_blob():
+ """Provides a pre-existing blob in the test bucket."""
+ bucket = storage.Client().bucket(BUCKET)
+ blob = bucket.blob("storage_snippets_test_sigil")
+ blob.upload_from_string("Hello, is it me you're looking for?")
+ return blob
+
+
+@backoff.on_exception(backoff.expo, GoogleAPIError, max_time=60)
+def test_download_file_requester_pays(test_blob, capsys):
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_file_requester_pays.download_file_requester_pays(
+ BUCKET, PROJECT, test_blob.name, dest_file.name
+ )
+
+ assert dest_file.read()
diff --git a/storage/samples/snippets/requirements-test.txt b/storage/samples/snippets/requirements-test.txt
new file mode 100644
index 00000000000..5644295d03e
--- /dev/null
+++ b/storage/samples/snippets/requirements-test.txt
@@ -0,0 +1,4 @@
+pytest===7.4.4; python_version == '3.7'
+pytest==8.3.5; python_version >= '3.8'
+mock==5.2.0
+backoff==2.2.1
diff --git a/storage/samples/snippets/requirements.txt b/storage/samples/snippets/requirements.txt
new file mode 100644
index 00000000000..751f8cfbe53
--- /dev/null
+++ b/storage/samples/snippets/requirements.txt
@@ -0,0 +1,8 @@
+google-cloud-pubsub==2.29.0
+google-cloud-storage==3.1.0
+pandas===1.3.5; python_version == '3.7'
+pandas===2.0.3; python_version == '3.8'
+pandas==2.2.3; python_version >= '3.9'
+opentelemetry-exporter-gcp-trace
+opentelemetry-propagator-gcp
+opentelemetry-instrumentation-requests
diff --git a/storage/samples/snippets/rpo_test.py b/storage/samples/snippets/rpo_test.py
new file mode 100644
index 00000000000..0dcf1574646
--- /dev/null
+++ b/storage/samples/snippets/rpo_test.py
@@ -0,0 +1,61 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+
+from google.cloud import storage
+import pytest
+
+import storage_create_bucket_turbo_replication
+import storage_get_rpo
+import storage_set_rpo_async_turbo
+import storage_set_rpo_default
+
+
+@pytest.fixture
+def dual_region_bucket():
+ """Yields a dual region bucket that is deleted after the test completes."""
+ bucket = None
+ location = "NAM4"
+ while bucket is None or bucket.exists():
+ bucket_name = f"bucket-lock-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create(location=location)
+ yield bucket
+ bucket.delete(force=True)
+
+
+def test_get_rpo(dual_region_bucket, capsys):
+ storage_get_rpo.get_rpo(dual_region_bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"RPO for {dual_region_bucket.name} is DEFAULT." in out
+
+
+def test_set_rpo_async_turbo(dual_region_bucket, capsys):
+ storage_set_rpo_async_turbo.set_rpo_async_turbo(dual_region_bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"RPO is set to ASYNC_TURBO for {dual_region_bucket.name}." in out
+
+
+def test_set_rpo_default(dual_region_bucket, capsys):
+ storage_set_rpo_default.set_rpo_default(dual_region_bucket.name)
+ out, _ = capsys.readouterr()
+ assert f"RPO is set to DEFAULT for {dual_region_bucket.name}." in out
+
+
+def test_create_bucket_turbo_replication(capsys):
+ bucket_name = f"test-rpo-{uuid.uuid4()}"
+ storage_create_bucket_turbo_replication.create_bucket_turbo_replication(bucket_name)
+ out, _ = capsys.readouterr()
+ assert f"{bucket_name} created with the recovery point objective (RPO) set to ASYNC_TURBO in NAM4." in out
diff --git a/storage/samples/snippets/snippets_test.py b/storage/samples/snippets/snippets_test.py
new file mode 100644
index 00000000000..1d3c8c1c442
--- /dev/null
+++ b/storage/samples/snippets/snippets_test.py
@@ -0,0 +1,1065 @@
+# Copyright 2016 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio
+import io
+import os
+import tempfile
+import time
+import uuid
+import sys
+
+from google.cloud import storage
+import google.cloud.exceptions
+import pytest
+import requests
+
+import storage_add_bucket_label
+import storage_async_download
+import storage_async_upload
+import storage_batch_request
+import storage_bucket_delete_default_kms_key
+import storage_change_default_storage_class
+import storage_change_file_storage_class
+import storage_compose_file
+import storage_configure_retries
+import storage_copy_file
+import storage_copy_file_archived_generation
+import storage_cors_configuration
+import storage_create_bucket_class_location
+import storage_create_bucket_dual_region
+import storage_create_bucket_hierarchical_namespace
+import storage_create_bucket_object_retention
+import storage_define_bucket_website_configuration
+import storage_delete_file
+import storage_delete_file_archived_generation
+import storage_disable_bucket_lifecycle_management
+import storage_disable_soft_delete
+import storage_disable_versioning
+import storage_download_byte_range
+import storage_download_file
+import storage_download_into_memory
+import storage_download_public_file
+import storage_download_to_stream
+import storage_enable_bucket_lifecycle_management
+import storage_enable_versioning
+import storage_generate_signed_post_policy_v4
+import storage_generate_signed_url_v2
+import storage_generate_signed_url_v4
+import storage_generate_upload_signed_url_v4
+import storage_get_autoclass
+import storage_get_bucket_labels
+import storage_get_bucket_metadata
+import storage_get_metadata
+import storage_get_service_account
+import storage_get_soft_delete_policy
+import storage_get_soft_deleted_bucket
+import storage_list_buckets
+import storage_list_file_archived_generations
+import storage_list_files
+import storage_list_files_with_prefix
+import storage_list_soft_deleted_buckets
+import storage_list_soft_deleted_object_versions
+import storage_list_soft_deleted_objects
+import storage_make_public
+import storage_move_file
+import storage_move_file_atomically
+import storage_object_get_kms_key
+import storage_remove_bucket_label
+import storage_remove_cors_configuration
+import storage_rename_file
+import storage_restore_object
+import storage_restore_soft_deleted_bucket
+import storage_set_autoclass
+import storage_set_bucket_default_kms_key
+import storage_set_client_endpoint
+import storage_set_metadata
+import storage_set_object_retention_policy
+import storage_set_soft_delete_policy
+import storage_trace_quickstart
+import storage_transfer_manager_download_bucket
+import storage_transfer_manager_download_chunks_concurrently
+import storage_transfer_manager_download_many
+import storage_transfer_manager_upload_chunks_concurrently
+import storage_transfer_manager_upload_directory
+import storage_transfer_manager_upload_many
+import storage_upload_file
+import storage_upload_from_memory
+import storage_upload_from_stream
+import storage_upload_with_kms_key
+
+KMS_KEY = os.environ.get("CLOUD_KMS_KEY")
+IS_PYTHON_3_14 = sys.version_info[:2] == (3, 14)
+
+
+@pytest.mark.skipif(IS_PYTHON_3_14, reason="b/470276398")
+def test_enable_default_kms_key(test_bucket):
+ storage_set_bucket_default_kms_key.enable_default_kms_key(
+ bucket_name=test_bucket.name, kms_key_name=KMS_KEY
+ )
+ time.sleep(2) # Let change propagate as needed
+ bucket = storage.Client().get_bucket(test_bucket.name)
+ assert bucket.default_kms_key_name.startswith(KMS_KEY)
+ bucket.default_kms_key_name = None
+ bucket.patch()
+
+
+def test_get_bucket_labels(test_bucket):
+ storage_get_bucket_labels.get_bucket_labels(test_bucket.name)
+
+
+def test_add_bucket_label(test_bucket, capsys):
+ storage_add_bucket_label.add_bucket_label(test_bucket.name)
+ out, _ = capsys.readouterr()
+ assert "example" in out
+
+
+def test_remove_bucket_label(test_bucket, capsys):
+ storage_add_bucket_label.add_bucket_label(test_bucket.name)
+ storage_remove_bucket_label.remove_bucket_label(test_bucket.name)
+ out, _ = capsys.readouterr()
+ assert "Removed labels" in out
+
+
+@pytest.fixture(scope="module")
+def test_bucket():
+ """Yields a bucket that is deleted after the test completes."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create()
+ yield bucket
+ bucket.delete(force=True)
+
+
+@pytest.fixture(scope="module")
+def test_soft_deleted_bucket():
+ """Yields a soft-deleted bucket."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ bucket.create()
+ # [Assumption] Bucket is created with default policy , ie soft delete on.
+ bucket.delete()
+ yield bucket
+
+
+@pytest.fixture(scope="function")
+def test_soft_delete_enabled_bucket():
+ """Yields a bucket with soft-delete enabled that is deleted after the test completes."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ # Soft-delete retention for 7 days (minimum allowed by API)
+ bucket.soft_delete_policy.retention_duration_seconds = 7 * 24 * 60 * 60
+ # Soft-delete requires a region
+ bucket.create(location="US-CENTRAL1")
+ yield bucket
+ bucket.delete(force=True)
+
+
+@pytest.fixture(scope="function")
+def test_public_bucket():
+ # The new projects don't allow to make a bucket available to public, so
+ # for some tests we need to use the old main project for now.
+ original_value = os.environ["GOOGLE_CLOUD_PROJECT"]
+ os.environ["GOOGLE_CLOUD_PROJECT"] = os.environ["MAIN_GOOGLE_CLOUD_PROJECT"]
+ bucket = None
+ while bucket is None or bucket.exists():
+ storage_client = storage.Client()
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage_client.bucket(bucket_name)
+ storage_client.create_bucket(bucket)
+ yield bucket
+ bucket.delete(force=True)
+ # Set the value back.
+ os.environ["GOOGLE_CLOUD_PROJECT"] = original_value
+
+
+@pytest.fixture(scope="module")
+def new_bucket_obj():
+ """Yields a new bucket object that is deleted after the test completes."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ yield bucket
+ bucket.delete(force=True)
+
+
+@pytest.fixture
+def test_blob(test_bucket):
+ """Yields a blob that is deleted after the test completes."""
+ bucket = test_bucket
+ blob = bucket.blob(f"storage_snippets_test_sigil-{uuid.uuid4()}")
+ blob.upload_from_string("Hello, is it me you're looking for?")
+ yield blob
+
+
+@pytest.fixture(scope="function")
+def test_public_blob(test_public_bucket):
+ """Yields a blob that is deleted after the test completes."""
+ bucket = test_public_bucket
+ blob = bucket.blob(f"storage_snippets_test_sigil-{uuid.uuid4()}")
+ blob.upload_from_string("Hello, is it me you're looking for?")
+ yield blob
+
+
+@pytest.fixture
+def test_bucket_create():
+ """Yields a bucket object that is deleted after the test completes."""
+ bucket = None
+ while bucket is None or bucket.exists():
+ bucket_name = f"storage-snippets-test-{uuid.uuid4()}"
+ bucket = storage.Client().bucket(bucket_name)
+ yield bucket
+ bucket.delete(force=True)
+
+
+def test_list_buckets(test_bucket, capsys):
+ storage_list_buckets.list_buckets()
+ out, _ = capsys.readouterr()
+ assert test_bucket.name in out
+
+
+def test_list_soft_deleted_buckets(test_soft_deleted_bucket, capsys):
+ storage_list_soft_deleted_buckets.list_soft_deleted_buckets()
+ out, _ = capsys.readouterr()
+ assert test_soft_deleted_bucket.name in out
+
+
+def test_list_blobs(test_blob, capsys):
+ storage_list_files.list_blobs(test_blob.bucket.name)
+ out, _ = capsys.readouterr()
+ assert test_blob.name in out
+
+
+def test_bucket_metadata(test_bucket, capsys):
+ storage_get_bucket_metadata.bucket_metadata(test_bucket.name)
+ out, _ = capsys.readouterr()
+ assert test_bucket.name in out
+
+
+def test_get_soft_deleted_bucket(test_soft_deleted_bucket, capsys):
+ storage_get_soft_deleted_bucket.get_soft_deleted_bucket(
+ test_soft_deleted_bucket.name, test_soft_deleted_bucket.generation
+ )
+ out, _ = capsys.readouterr()
+ assert test_soft_deleted_bucket.name in out
+
+
+def test_restore_soft_deleted_bucket(test_soft_deleted_bucket, capsys):
+ storage_restore_soft_deleted_bucket.restore_bucket(
+ test_soft_deleted_bucket.name, test_soft_deleted_bucket.generation
+ )
+ out, _ = capsys.readouterr()
+ assert test_soft_deleted_bucket.name in out
+
+
+def test_list_blobs_with_prefix(test_blob, capsys):
+ storage_list_files_with_prefix.list_blobs_with_prefix(
+ test_blob.bucket.name, prefix="storage_snippets"
+ )
+ out, _ = capsys.readouterr()
+ assert test_blob.name in out
+
+
+def test_upload_blob(test_bucket):
+ with tempfile.NamedTemporaryFile() as source_file:
+ source_file.write(b"test")
+ source_file.flush()
+
+ storage_upload_file.upload_blob(
+ test_bucket.name, source_file.name, "test_upload_blob"
+ )
+
+
+def test_upload_blob_from_memory(test_bucket, capsys):
+ storage_upload_from_memory.upload_blob_from_memory(
+ test_bucket.name, "Hello, is it me you're looking for?", "test_upload_blob"
+ )
+ out, _ = capsys.readouterr()
+
+ assert "Hello, is it me you're looking for?" in out
+
+
+def test_upload_blob_from_stream(test_bucket, capsys):
+ file_obj = io.BytesIO()
+ file_obj.write(b"This is test data.")
+ storage_upload_from_stream.upload_blob_from_stream(
+ test_bucket.name, file_obj, "test_upload_blob"
+ )
+ out, _ = capsys.readouterr()
+
+ assert "Stream data uploaded to test_upload_blob" in out
+
+
+@pytest.mark.skipif(IS_PYTHON_3_14, reason="b/470276398")
+def test_upload_blob_with_kms(test_bucket):
+ blob_name = f"test_upload_with_kms_{uuid.uuid4().hex}"
+ with tempfile.NamedTemporaryFile() as source_file:
+ source_file.write(b"test")
+ source_file.flush()
+ storage_upload_with_kms_key.upload_blob_with_kms(
+ test_bucket.name,
+ source_file.name,
+ blob_name,
+ KMS_KEY,
+ )
+ bucket = storage.Client().bucket(test_bucket.name)
+ kms_blob = bucket.get_blob(blob_name)
+ assert kms_blob.kms_key_name.startswith(KMS_KEY)
+ test_bucket.delete_blob(blob_name)
+
+
+def test_async_upload(bucket, capsys):
+ asyncio.run(storage_async_upload.async_upload_blob(bucket.name))
+ out, _ = capsys.readouterr()
+ assert f"Uploaded 3 files to bucket {bucket.name}" in out
+
+
+def test_async_download(test_bucket, capsys):
+ object_count = 3
+ source_files = [f"async_sample_blob_{x}" for x in range(object_count)]
+ for source in source_files:
+ blob = test_bucket.blob(source)
+ blob.upload_from_string(source)
+
+ asyncio.run(
+ storage_async_download.async_download_blobs(test_bucket.name, *source_files)
+ )
+ out, _ = capsys.readouterr()
+ for x in range(object_count):
+ assert f"Downloaded storage object async_sample_blob_{x}" in out
+
+
+def test_download_byte_range(test_blob):
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_byte_range.download_byte_range(
+ test_blob.bucket.name, test_blob.name, 0, 4, dest_file.name
+ )
+ assert dest_file.read() == b"Hello"
+
+
+def test_download_blob(test_blob):
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_file.download_blob(
+ test_blob.bucket.name, test_blob.name, dest_file.name
+ )
+
+ assert dest_file.read()
+
+
+def test_download_blob_into_memory(test_blob, capsys):
+ storage_download_into_memory.download_blob_into_memory(
+ test_blob.bucket.name, test_blob.name
+ )
+ out, _ = capsys.readouterr()
+
+ assert "Hello, is it me you're looking for?" in out
+
+
+def test_download_blob_to_stream(test_blob, capsys):
+ file_obj = io.BytesIO()
+ storage_download_to_stream.download_blob_to_stream(
+ test_blob.bucket.name, test_blob.name, file_obj
+ )
+ out, _ = capsys.readouterr()
+
+ file_obj.seek(0)
+ content = file_obj.read()
+
+ assert "Downloaded blob" in out
+ assert b"Hello, is it me you're looking for?" in content
+
+
+def test_blob_metadata(test_blob, capsys):
+ storage_get_metadata.blob_metadata(test_blob.bucket.name, test_blob.name)
+ out, _ = capsys.readouterr()
+ assert test_blob.name in out
+
+
+def test_set_blob_metadata(test_blob, capsys):
+ storage_set_metadata.set_blob_metadata(test_blob.bucket.name, test_blob.name)
+ out, _ = capsys.readouterr()
+ assert test_blob.name in out
+
+
+def test_delete_blob(test_blob):
+ storage_delete_file.delete_blob(test_blob.bucket.name, test_blob.name)
+
+
+@pytest.mark.xfail(reason="wait until b/469643064 is fixed")
+def test_make_blob_public(test_public_blob):
+ storage_make_public.make_blob_public(
+ test_public_blob.bucket.name, test_public_blob.name
+ )
+
+ r = requests.get(test_public_blob.public_url)
+ assert r.text == "Hello, is it me you're looking for?"
+
+
+def test_generate_signed_url(test_blob, capsys):
+ url = storage_generate_signed_url_v2.generate_signed_url(
+ test_blob.bucket.name, test_blob.name
+ )
+
+ r = requests.get(url)
+ assert r.text == "Hello, is it me you're looking for?"
+
+
+def test_generate_download_signed_url_v4(test_blob, capsys):
+ url = storage_generate_signed_url_v4.generate_download_signed_url_v4(
+ test_blob.bucket.name, test_blob.name
+ )
+
+ r = requests.get(url)
+ assert r.text == "Hello, is it me you're looking for?"
+
+
+def test_generate_upload_signed_url_v4(test_bucket, capsys):
+ blob_name = "storage_snippets_test_upload"
+ content = b"Uploaded via v4 signed url"
+ url = storage_generate_upload_signed_url_v4.generate_upload_signed_url_v4(
+ test_bucket.name, blob_name
+ )
+
+ requests.put(
+ url,
+ data=content,
+ headers={"content-type": "application/octet-stream"},
+ )
+
+ bucket = storage.Client().bucket(test_bucket.name)
+ blob = bucket.blob(blob_name)
+ assert blob.download_as_bytes() == content
+
+
+def test_generate_signed_policy_v4(test_bucket, capsys):
+ blob_name = "storage_snippets_test_form"
+ short_name = storage_generate_signed_post_policy_v4
+ form = short_name.generate_signed_post_policy_v4(test_bucket.name, blob_name)
+ assert f"name='key' value='{blob_name}'" in form
+ assert "name='x-goog-signature'" in form
+ assert "name='x-goog-date'" in form
+ assert "name='x-goog-credential'" in form
+ assert "name='x-goog-algorithm' value='GOOG4-RSA-SHA256'" in form
+ assert "name='policy'" in form
+ assert "name='x-goog-meta-test' value='data'" in form
+ assert "type='file' name='file'/>" in form
+
+
+def test_rename_blob(test_blob):
+ bucket = storage.Client().bucket(test_blob.bucket.name)
+
+ try:
+ bucket.delete_blob("test_rename_blob")
+ except google.cloud.exceptions.exceptions.NotFound:
+ print(f"test_rename_blob not found in bucket {bucket.name}")
+
+ storage_rename_file.rename_blob(bucket.name, test_blob.name, "test_rename_blob")
+
+ assert bucket.get_blob("test_rename_blob") is not None
+ assert bucket.get_blob(test_blob.name) is None
+
+
+def test_move_blob(test_bucket_create, test_blob):
+ bucket = test_blob.bucket
+ storage.Client().create_bucket(test_bucket_create)
+
+ try:
+ test_bucket_create.delete_blob("test_move_blob")
+ except google.cloud.exceptions.NotFound:
+ print(f"test_move_blob not found in bucket {test_bucket_create.name}")
+
+ storage_move_file.move_blob(
+ bucket.name,
+ test_blob.name,
+ test_bucket_create.name,
+ "test_move_blob",
+ )
+
+ assert test_bucket_create.get_blob("test_move_blob") is not None
+ assert bucket.get_blob(test_blob.name) is None
+
+
+def test_copy_blob(test_blob):
+ bucket = storage.Client().bucket(test_blob.bucket.name)
+
+ try:
+ bucket.delete_blob("test_copy_blob")
+ except google.cloud.exceptions.NotFound:
+ pass
+
+ storage_copy_file.copy_blob(
+ bucket.name,
+ test_blob.name,
+ bucket.name,
+ "test_copy_blob",
+ )
+
+ assert bucket.get_blob("test_copy_blob") is not None
+ assert bucket.get_blob(test_blob.name) is not None
+
+
+def test_versioning(test_bucket, capsys):
+ bucket = storage_enable_versioning.enable_versioning(test_bucket)
+ out, _ = capsys.readouterr()
+ assert "Versioning was enabled for bucket" in out
+ assert bucket.versioning_enabled is True
+
+ bucket = storage_disable_versioning.disable_versioning(test_bucket)
+ out, _ = capsys.readouterr()
+ assert "Versioning was disabled for bucket" in out
+ assert bucket.versioning_enabled is False
+
+
+def test_get_set_autoclass(new_bucket_obj, test_bucket, capsys):
+ # Test default values when Autoclass is unset
+ bucket = storage_get_autoclass.get_autoclass(test_bucket.name)
+ out, _ = capsys.readouterr()
+ assert "Autoclass enabled is set to False" in out
+ assert bucket.autoclass_toggle_time is None
+ assert bucket.autoclass_terminal_storage_class_update_time is None
+
+ # Test enabling Autoclass at bucket creation
+ new_bucket_obj.autoclass_enabled = True
+ bucket = storage.Client().create_bucket(new_bucket_obj)
+ assert bucket.autoclass_enabled is True
+ assert bucket.autoclass_terminal_storage_class == "NEARLINE"
+
+ # Test set terminal_storage_class to ARCHIVE
+ bucket = storage_set_autoclass.set_autoclass(bucket.name)
+ out, _ = capsys.readouterr()
+ assert "Autoclass enabled is set to True" in out
+ assert bucket.autoclass_enabled is True
+ assert bucket.autoclass_terminal_storage_class == "ARCHIVE"
+
+ # Test get Autoclass
+ bucket = storage_get_autoclass.get_autoclass(bucket.name)
+ out, _ = capsys.readouterr()
+ assert "Autoclass enabled is set to True" in out
+ assert bucket.autoclass_toggle_time is not None
+ assert bucket.autoclass_terminal_storage_class_update_time is not None
+
+
+def test_bucket_lifecycle_management(test_bucket, capsys):
+ bucket = (
+ storage_enable_bucket_lifecycle_management.enable_bucket_lifecycle_management(
+ test_bucket
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert "[]" in out
+ assert "Lifecycle management is enable" in out
+ assert len(list(bucket.lifecycle_rules)) > 0
+
+ bucket = (
+ storage_disable_bucket_lifecycle_management.disable_bucket_lifecycle_management(
+ test_bucket
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert "[]" in out
+ assert len(list(bucket.lifecycle_rules)) == 0
+
+
+def test_create_bucket_class_location(test_bucket_create):
+ bucket = storage_create_bucket_class_location.create_bucket_class_location(
+ test_bucket_create.name
+ )
+
+ assert bucket.location == "US"
+ assert bucket.storage_class == "COLDLINE"
+
+
+def test_create_bucket_dual_region(test_bucket_create, capsys):
+ location = "US"
+ region_1 = "US-EAST1"
+ region_2 = "US-WEST1"
+ storage_create_bucket_dual_region.create_bucket_dual_region(
+ test_bucket_create.name, location, region_1, region_2
+ )
+ out, _ = capsys.readouterr()
+ assert f"Created bucket {test_bucket_create.name}" in out
+ assert location in out
+ assert region_1 in out
+ assert region_2 in out
+ assert "dual-region" in out
+
+
+@pytest.mark.skipif(IS_PYTHON_3_14, reason="b/470276398")
+def test_bucket_delete_default_kms_key(test_bucket, capsys):
+ test_bucket.default_kms_key_name = KMS_KEY
+ test_bucket.patch()
+
+ assert test_bucket.default_kms_key_name == KMS_KEY
+
+ bucket = storage_bucket_delete_default_kms_key.bucket_delete_default_kms_key(
+ test_bucket.name
+ )
+
+ out, _ = capsys.readouterr()
+ assert bucket.default_kms_key_name is None
+ assert bucket.name in out
+
+
+def test_get_service_account(capsys):
+ storage_get_service_account.get_service_account()
+
+ out, _ = capsys.readouterr()
+
+ assert "@gs-project-accounts.iam.gserviceaccount.com" in out
+
+
+@pytest.mark.xfail(reason="wait until b/469643064 is fixed")
+def test_download_public_file(test_public_blob):
+ storage_make_public.make_blob_public(
+ test_public_blob.bucket.name, test_public_blob.name
+ )
+ with tempfile.NamedTemporaryFile() as dest_file:
+ storage_download_public_file.download_public_file(
+ test_public_blob.bucket.name, test_public_blob.name, dest_file.name
+ )
+
+ assert dest_file.read() == b"Hello, is it me you're looking for?"
+
+
+def test_define_bucket_website_configuration(test_bucket):
+ bucket = (
+ storage_define_bucket_website_configuration.define_bucket_website_configuration(
+ test_bucket.name, "index.html", "404.html"
+ )
+ )
+
+ website_val = {"mainPageSuffix": "index.html", "notFoundPage": "404.html"}
+
+ assert bucket._properties["website"] == website_val
+
+
+@pytest.mark.skipif(IS_PYTHON_3_14, reason="b/470276398")
+def test_object_get_kms_key(test_bucket):
+ with tempfile.NamedTemporaryFile() as source_file:
+ storage_upload_with_kms_key.upload_blob_with_kms(
+ test_bucket.name,
+ source_file.name,
+ "test_upload_blob_encrypted",
+ KMS_KEY,
+ )
+ kms_key = storage_object_get_kms_key.object_get_kms_key(
+ test_bucket.name, "test_upload_blob_encrypted"
+ )
+
+ assert kms_key.startswith(KMS_KEY)
+
+
+def test_storage_compose_file(test_bucket):
+ source_files = ["test_upload_blob_1", "test_upload_blob_2"]
+ for source in source_files:
+ blob = test_bucket.blob(source)
+ blob.upload_from_string(source)
+
+ with tempfile.NamedTemporaryFile() as dest_file:
+ destination = storage_compose_file.compose_file(
+ test_bucket.name,
+ source_files[0],
+ source_files[1],
+ dest_file.name,
+ )
+ composed = destination.download_as_bytes()
+
+ assert composed.decode("utf-8") == source_files[0] + source_files[1]
+
+
+def test_cors_configuration(test_bucket, capsys):
+ bucket = storage_cors_configuration.cors_configuration(test_bucket)
+ out, _ = capsys.readouterr()
+ assert "Set CORS policies for bucket" in out
+ assert len(bucket.cors) > 0
+
+ bucket = storage_remove_cors_configuration.remove_cors_configuration(test_bucket)
+ out, _ = capsys.readouterr()
+ assert "Remove CORS policies for bucket" in out
+ assert len(bucket.cors) == 0
+
+
+def test_delete_blobs_archived_generation(test_blob, capsys):
+ storage_delete_file_archived_generation.delete_file_archived_generation(
+ test_blob.bucket.name, test_blob.name, test_blob.generation
+ )
+ out, _ = capsys.readouterr()
+ assert "blob " + test_blob.name + " was deleted" in out
+ blob = test_blob.bucket.get_blob(test_blob.name, generation=test_blob.generation)
+ assert blob is None
+
+
+def test_change_default_storage_class(test_bucket, capsys):
+ bucket = storage_change_default_storage_class.change_default_storage_class(
+ test_bucket
+ )
+ out, _ = capsys.readouterr()
+ assert "Default storage class for bucket" in out
+ assert bucket.storage_class == "COLDLINE"
+
+
+def test_change_file_storage_class(test_blob, capsys):
+ blob = storage_change_file_storage_class.change_file_storage_class(
+ test_blob.bucket.name,
+ test_blob.name,
+ )
+ out, _ = capsys.readouterr()
+ assert f"Blob {blob.name} in bucket {blob.bucket.name}" in out
+ assert blob.storage_class == "NEARLINE"
+
+
+def test_copy_file_archived_generation(test_blob):
+ bucket = storage.Client().bucket(test_blob.bucket.name)
+
+ try:
+ bucket.delete_blob("test_copy_blob")
+ except google.cloud.exceptions.NotFound:
+ pass
+
+ storage_copy_file_archived_generation.copy_file_archived_generation(
+ bucket.name, test_blob.name, bucket.name, "test_copy_blob", test_blob.generation
+ )
+
+ assert bucket.get_blob("test_copy_blob") is not None
+ assert bucket.get_blob(test_blob.name) is not None
+
+
+def test_list_blobs_archived_generation(test_blob, capsys):
+ storage_list_file_archived_generations.list_file_archived_generations(
+ test_blob.bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert str(test_blob.generation) in out
+
+
+def test_storage_configure_retries(test_blob, capsys):
+ storage_configure_retries.configure_retries(test_blob.bucket.name, test_blob.name)
+
+ # This simply checks if the retry configurations were set and printed as intended.
+ out, _ = capsys.readouterr()
+ assert "The following library method is customized to be retried" in out
+ assert "_should_retry" in out
+ assert "initial=1.5, maximum=45.0, multiplier=1.2" in out
+ assert "500" in out # "deadline" or "timeout" depending on dependency ver.
+
+
+def test_batch_request(test_bucket):
+ blob1 = test_bucket.blob("b/1.txt")
+ blob2 = test_bucket.blob("b/2.txt")
+ blob1.upload_from_string("hello world")
+ blob2.upload_from_string("hello world")
+
+ storage_batch_request.batch_request(test_bucket.name, "b/")
+ blob1.reload()
+ blob2.reload()
+
+ assert blob1.metadata.get("your-metadata-key") == "your-metadata-value"
+ assert blob2.metadata.get("your-metadata-key") == "your-metadata-value"
+
+
+def test_storage_set_client_endpoint(capsys):
+ storage_set_client_endpoint.set_client_endpoint("https://storage.googleapis.com")
+ out, _ = capsys.readouterr()
+
+ assert "client initiated with endpoint: https://storage.googleapis.com" in out
+
+
+def test_transfer_manager_snippets(test_bucket, capsys):
+ BLOB_NAMES = [
+ "test.txt",
+ "test2.txt",
+ "blobs/test.txt",
+ "blobs/nesteddir/test.txt",
+ ]
+
+ with tempfile.TemporaryDirectory() as uploads:
+ # Create dirs and nested dirs
+ for name in BLOB_NAMES:
+ relpath = os.path.dirname(name)
+ os.makedirs(os.path.join(uploads, relpath), exist_ok=True)
+
+ # Create files with nested dirs to exercise directory handling.
+ for name in BLOB_NAMES:
+ with open(os.path.join(uploads, name), "w") as f:
+ f.write(name)
+
+ storage_transfer_manager_upload_many.upload_many_blobs_with_transfer_manager(
+ test_bucket.name,
+ BLOB_NAMES,
+ source_directory="{}/".format(uploads),
+ workers=8,
+ )
+ out, _ = capsys.readouterr()
+
+ for name in BLOB_NAMES:
+ assert "Uploaded {}".format(name) in out
+
+ with tempfile.TemporaryDirectory() as downloads:
+ # Download the files.
+ storage_transfer_manager_download_bucket.download_bucket_with_transfer_manager(
+ test_bucket.name,
+ destination_directory=os.path.join(downloads, ""),
+ workers=8,
+ max_results=10000,
+ )
+ out, _ = capsys.readouterr()
+
+ for name in BLOB_NAMES:
+ assert "Downloaded {}".format(name) in out
+
+ with tempfile.TemporaryDirectory() as downloads:
+ # Download the files.
+ storage_transfer_manager_download_many.download_many_blobs_with_transfer_manager(
+ test_bucket.name,
+ blob_names=BLOB_NAMES,
+ destination_directory=os.path.join(downloads, ""),
+ workers=8,
+ )
+ out, _ = capsys.readouterr()
+
+ for name in BLOB_NAMES:
+ assert "Downloaded {}".format(name) in out
+
+
+def test_transfer_manager_directory_upload(test_bucket, capsys):
+ BLOB_NAMES = [
+ "dirtest/test.txt",
+ "dirtest/test2.txt",
+ "dirtest/blobs/test.txt",
+ "dirtest/blobs/nesteddir/test.txt",
+ ]
+
+ with tempfile.TemporaryDirectory() as uploads:
+ # Create dirs and nested dirs
+ for name in BLOB_NAMES:
+ relpath = os.path.dirname(name)
+ os.makedirs(os.path.join(uploads, relpath), exist_ok=True)
+
+ # Create files with nested dirs to exercise directory handling.
+ for name in BLOB_NAMES:
+ with open(os.path.join(uploads, name), "w") as f:
+ f.write(name)
+
+ storage_transfer_manager_upload_directory.upload_directory_with_transfer_manager(
+ test_bucket.name, source_directory="{}/".format(uploads)
+ )
+ out, _ = capsys.readouterr()
+
+ assert "Found {}".format(len(BLOB_NAMES)) in out
+ for name in BLOB_NAMES:
+ assert "Uploaded {}".format(name) in out
+
+
+def test_transfer_manager_download_chunks_concurrently(test_bucket, capsys):
+ BLOB_NAME = "test_file.txt"
+
+ with tempfile.NamedTemporaryFile() as file:
+ file.write(b"test")
+ file.flush()
+
+ storage_upload_file.upload_blob(test_bucket.name, file.name, BLOB_NAME)
+
+ with tempfile.TemporaryDirectory() as downloads:
+ # Download the file.
+ storage_transfer_manager_download_chunks_concurrently.download_chunks_concurrently(
+ test_bucket.name,
+ BLOB_NAME,
+ os.path.join(downloads, BLOB_NAME),
+ workers=8,
+ )
+ out, _ = capsys.readouterr()
+
+ assert (
+ "Downloaded {} to {}".format(BLOB_NAME, os.path.join(downloads, BLOB_NAME))
+ in out
+ )
+
+
+def test_transfer_manager_upload_chunks_concurrently(test_bucket, capsys):
+ BLOB_NAME = "test_file.txt"
+
+ with tempfile.NamedTemporaryFile() as file:
+ file.write(b"test")
+ file.flush()
+
+ storage_transfer_manager_upload_chunks_concurrently.upload_chunks_concurrently(
+ test_bucket.name, file.name, BLOB_NAME
+ )
+
+ out, _ = capsys.readouterr()
+ assert "File {} uploaded to {}".format(file.name, BLOB_NAME) in out
+
+
+def test_object_retention_policy(test_bucket_create, capsys):
+ storage_create_bucket_object_retention.create_bucket_object_retention(
+ test_bucket_create.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Created bucket {test_bucket_create.name} with object retention enabled setting"
+ in out
+ )
+
+ blob_name = "test_object_retention"
+ storage_set_object_retention_policy.set_object_retention_policy(
+ test_bucket_create.name, "hello world", blob_name
+ )
+ out, _ = capsys.readouterr()
+ assert f"Retention policy for file {blob_name}" in out
+
+ # Remove retention policy for test cleanup
+ blob = test_bucket_create.blob(blob_name)
+ blob.retention.mode = None
+ blob.retention.retain_until_time = None
+ blob.patch(override_unlocked_retention=True)
+
+
+def test_create_bucket_hierarchical_namespace(test_bucket_create, capsys):
+ storage_create_bucket_hierarchical_namespace.create_bucket_hierarchical_namespace(
+ test_bucket_create.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Created bucket {test_bucket_create.name} with hierarchical namespace enabled"
+ in out
+ )
+
+
+def test_storage_trace_quickstart(test_bucket, capsys):
+ blob_name = f"trace_quickstart_{uuid.uuid4().hex}"
+ contents = "The quick brown fox jumps over the lazy dog."
+ storage_trace_quickstart.run_quickstart(test_bucket.name, blob_name, contents)
+ out, _ = capsys.readouterr()
+
+ assert f"{blob_name} uploaded to {test_bucket.name}" in out
+ assert (
+ f"Downloaded storage object {blob_name} from bucket {test_bucket.name}" in out
+ )
+
+
+def test_storage_disable_soft_delete(test_soft_delete_enabled_bucket, capsys):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ storage_disable_soft_delete.disable_soft_delete(bucket_name)
+ out, _ = capsys.readouterr()
+ assert f"Soft-delete policy is disabled for bucket {bucket_name}" in out
+
+
+def test_storage_get_soft_delete_policy(test_soft_delete_enabled_bucket, capsys):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ storage_get_soft_delete_policy.get_soft_delete_policy(bucket_name)
+ out, _ = capsys.readouterr()
+ assert f"Soft-delete policy for {bucket_name}" in out
+ assert "Object soft-delete policy is enabled" in out
+ assert "Object retention duration: " in out
+ assert "Policy effective time: " in out
+
+ # Disable the soft-delete policy
+ test_soft_delete_enabled_bucket.soft_delete_policy.retention_duration_seconds = 0
+ test_soft_delete_enabled_bucket.patch()
+ storage_get_soft_delete_policy.get_soft_delete_policy(bucket_name)
+ out, _ = capsys.readouterr()
+ assert f"Soft-delete policy for {bucket_name}" in out
+ assert "Object soft-delete policy is disabled" in out
+
+
+def test_storage_set_soft_delete_policy(test_soft_delete_enabled_bucket, capsys):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ retention_duration_seconds = 10 * 24 * 60 * 60 # 10 days
+ storage_set_soft_delete_policy.set_soft_delete_policy(
+ bucket_name, retention_duration_seconds
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Soft delete policy for bucket {bucket_name} was set to {retention_duration_seconds} seconds retention period"
+ in out
+ )
+
+
+def test_storage_list_soft_deleted_objects(test_soft_delete_enabled_bucket, capsys):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ blob_name = f"test_object_{uuid.uuid4().hex}.txt"
+ blob_content = "This object will be soft-deleted for listing."
+ blob = test_soft_delete_enabled_bucket.blob(blob_name)
+ blob.upload_from_string(blob_content)
+ blob_generation = blob.generation
+
+ blob.delete() # Soft-delete the object
+ storage_list_soft_deleted_objects.list_soft_deleted_objects(bucket_name)
+ out, _ = capsys.readouterr()
+ assert f"Name: {blob_name}, Generation: {blob_generation}" in out
+
+
+def test_storage_list_soft_deleted_object_versions(
+ test_soft_delete_enabled_bucket, capsys
+):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ blob_name = f"test_object_{uuid.uuid4().hex}.txt"
+ blob_content = "This object will be soft-deleted for version listing."
+ blob = test_soft_delete_enabled_bucket.blob(blob_name)
+ blob.upload_from_string(blob_content)
+ blob_generation = blob.generation
+
+ blob.delete() # Soft-delete the object
+ storage_list_soft_deleted_object_versions.list_soft_deleted_object_versions(
+ bucket_name, blob_name
+ )
+ out, _ = capsys.readouterr()
+ assert f"Version ID: {blob_generation}" in out
+
+
+def test_storage_restore_soft_deleted_object(test_soft_delete_enabled_bucket, capsys):
+ bucket_name = test_soft_delete_enabled_bucket.name
+ blob_name = f"test-restore-sd-obj-{uuid.uuid4().hex}.txt"
+ blob_content = "This object will be soft-deleted and restored."
+ blob = test_soft_delete_enabled_bucket.blob(blob_name)
+ blob.upload_from_string(blob_content)
+ blob_generation = blob.generation
+
+ blob.delete() # Soft-delete the object
+ storage_restore_object.restore_soft_deleted_object(
+ bucket_name, blob_name, blob_generation
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Soft-deleted object {blob_name} is restored in the bucket {bucket_name}"
+ in out
+ )
+
+ # Verify the restoration
+ blob = test_soft_delete_enabled_bucket.get_blob(blob_name)
+ assert blob is not None
+
+
+def test_move_object(test_blob):
+ bucket = test_blob.bucket
+ try:
+ bucket.delete_blob("test_move_blob_atomic")
+ except google.cloud.exceptions.NotFound:
+ print(f"test_move_blob_atomic not found in bucket {bucket.name}")
+
+ storage_move_file_atomically.move_object(
+ bucket.name,
+ test_blob.name,
+ "test_move_blob_atomic",
+ )
+
+ assert bucket.get_blob("test_move_blob_atomic") is not None
+ assert bucket.get_blob(test_blob.name) is None
diff --git a/storage/samples/snippets/storage_activate_hmac_key.py b/storage/samples/snippets/storage_activate_hmac_key.py
new file mode 100644
index 00000000000..d3960eb622c
--- /dev/null
+++ b/storage/samples/snippets/storage_activate_hmac_key.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_activate_hmac_key]
+from google.cloud import storage
+
+
+def activate_key(access_id, project_id):
+ """
+ Activate the HMAC key with the given access ID.
+ """
+ # project_id = "Your Google Cloud project ID"
+ # access_id = "ID of an inactive HMAC key"
+
+ storage_client = storage.Client(project=project_id)
+
+ hmac_key = storage_client.get_hmac_key_metadata(
+ access_id, project_id=project_id
+ )
+ hmac_key.state = "ACTIVE"
+ hmac_key.update()
+
+ print("The HMAC key metadata is:")
+ print(f"Service Account Email: {hmac_key.service_account_email}")
+ print(f"Key ID: {hmac_key.id}")
+ print(f"Access ID: {hmac_key.access_id}")
+ print(f"Project ID: {hmac_key.project}")
+ print(f"State: {hmac_key.state}")
+ print(f"Created At: {hmac_key.time_created}")
+ print(f"Updated At: {hmac_key.updated}")
+ print(f"Etag: {hmac_key.etag}")
+ return hmac_key
+
+
+# [END storage_activate_hmac_key]
+
+if __name__ == "__main__":
+ activate_key(access_id=sys.argv[1], project_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_add_bucket_conditional_iam_binding.py b/storage/samples/snippets/storage_add_bucket_conditional_iam_binding.py
new file mode 100644
index 00000000000..d09f528cf72
--- /dev/null
+++ b/storage/samples/snippets/storage_add_bucket_conditional_iam_binding.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_add_bucket_conditional_iam_binding]
+from google.cloud import storage
+
+
+def add_bucket_conditional_iam_binding(
+ bucket_name, role, title, description, expression, members
+):
+ """Add a conditional IAM binding to a bucket's IAM policy."""
+ # bucket_name = "your-bucket-name"
+ # role = "IAM role, e.g. roles/storage.objectViewer"
+ # members = {"IAM identity, e.g. user: name@example.com}"
+ # title = "Condition title."
+ # description = "Condition description."
+ # expression = "Condition expression."
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+
+ # Set the policy's version to 3 to use condition in bindings.
+ policy.version = 3
+
+ policy.bindings.append(
+ {
+ "role": role,
+ "members": members,
+ "condition": {
+ "title": title,
+ "description": description,
+ "expression": expression,
+ },
+ }
+ )
+
+ bucket.set_iam_policy(policy)
+
+ print(f"Added the following member(s) with role {role} to {bucket_name}:")
+
+ for member in members:
+ print(f" {member}")
+
+ print("with condition:")
+ print(f" Title: {title}")
+ print(f" Description: {description}")
+ print(f" Expression: {expression}")
+
+
+# [END storage_add_bucket_conditional_iam_binding]
+
+
+if __name__ == "__main__":
+ add_bucket_conditional_iam_binding(
+ bucket_name=sys.argv[1],
+ role=sys.argv[2],
+ title=sys.argv[3],
+ description=sys.argv[4],
+ expression=sys.argv[5],
+ members=set(sys.argv[6::]),
+ )
diff --git a/storage/samples/snippets/storage_add_bucket_default_owner.py b/storage/samples/snippets/storage_add_bucket_default_owner.py
new file mode 100644
index 00000000000..932b1328f3f
--- /dev/null
+++ b/storage/samples/snippets/storage_add_bucket_default_owner.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_add_bucket_default_owner]
+from google.cloud import storage
+
+
+def add_bucket_default_owner(bucket_name, user_email):
+ """Adds a user as an owner in the given bucket's default object access
+ control list."""
+ # bucket_name = "your-bucket-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ bucket.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # grant access to different types of entities. You can also use
+ # `grant_read` or `grant_write` to grant different roles.
+ bucket.default_object_acl.user(user_email).grant_owner()
+ bucket.default_object_acl.save()
+
+ print(
+ "Added user {} as an owner in the default acl on bucket {}.".format(
+ user_email, bucket_name
+ )
+ )
+
+
+# [END storage_add_bucket_default_owner]
+
+if __name__ == "__main__":
+ add_bucket_default_owner(bucket_name=sys.argv[1], user_email=sys.argv[2])
diff --git a/storage/samples/snippets/storage_add_bucket_iam_member.py b/storage/samples/snippets/storage_add_bucket_iam_member.py
new file mode 100644
index 00000000000..0d610eae7ce
--- /dev/null
+++ b/storage/samples/snippets/storage_add_bucket_iam_member.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_add_bucket_iam_member]
+from google.cloud import storage
+
+
+def add_bucket_iam_member(bucket_name, role, member):
+ """Add a new member to an IAM Policy"""
+ # bucket_name = "your-bucket-name"
+ # role = "IAM role, e.g., roles/storage.objectViewer"
+ # member = "IAM identity, e.g., user: name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+
+ policy.bindings.append({"role": role, "members": {member}})
+
+ bucket.set_iam_policy(policy)
+
+ print(f"Added {member} with role {role} to {bucket_name}.")
+
+
+# [END storage_add_bucket_iam_member]
+
+
+if __name__ == "__main__":
+ add_bucket_iam_member(bucket_name=sys.argv[1], role=sys.argv[2], member=sys.argv[3])
diff --git a/storage/samples/snippets/storage_add_bucket_label.py b/storage/samples/snippets/storage_add_bucket_label.py
new file mode 100644
index 00000000000..9c6fcff7af3
--- /dev/null
+++ b/storage/samples/snippets/storage_add_bucket_label.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_add_bucket_label]
+import pprint
+# [END storage_add_bucket_label]
+import sys
+# [START storage_add_bucket_label]
+
+from google.cloud import storage
+
+
+def add_bucket_label(bucket_name):
+ """Add a label to a bucket."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ labels = bucket.labels
+ labels["example"] = "label"
+ bucket.labels = labels
+ bucket.patch()
+
+ print(f"Updated labels on {bucket.name}.")
+ pprint.pprint(bucket.labels)
+
+
+# [END storage_add_bucket_label]
+
+if __name__ == "__main__":
+ add_bucket_label(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_add_bucket_owner.py b/storage/samples/snippets/storage_add_bucket_owner.py
new file mode 100644
index 00000000000..bac1f3f6440
--- /dev/null
+++ b/storage/samples/snippets/storage_add_bucket_owner.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_add_bucket_owner]
+from google.cloud import storage
+
+
+def add_bucket_owner(bucket_name, user_email):
+ """Adds a user as an owner on the given bucket."""
+ # bucket_name = "your-bucket-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ bucket.acl.reload()
+
+ # You can also use `group()`, `domain()`, `all_authenticated()` and `all()`
+ # to grant access to different types of entities.
+ # You can also use `grant_read()` or `grant_write()` to grant different
+ # roles.
+ bucket.acl.user(user_email).grant_owner()
+ bucket.acl.save()
+
+ print(
+ f"Added user {user_email} as an owner on bucket {bucket_name}."
+ )
+
+
+# [END storage_add_bucket_owner]
+
+if __name__ == "__main__":
+ add_bucket_owner(bucket_name=sys.argv[1], user_email=sys.argv[2])
diff --git a/storage/samples/snippets/storage_add_file_owner.py b/storage/samples/snippets/storage_add_file_owner.py
new file mode 100644
index 00000000000..9e9342590c4
--- /dev/null
+++ b/storage/samples/snippets/storage_add_file_owner.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_add_file_owner]
+from google.cloud import storage
+
+
+def add_blob_owner(bucket_name, blob_name, user_email):
+ """Adds a user as an owner on the given blob."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ blob.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # grant access to different types of entities. You can also use
+ # `grant_read` or `grant_write` to grant different roles.
+ blob.acl.user(user_email).grant_owner()
+ blob.acl.save()
+
+ print(
+ "Added user {} as an owner on blob {} in bucket {}.".format(
+ user_email, blob_name, bucket_name
+ )
+ )
+
+
+# [END storage_add_file_owner]
+
+if __name__ == "__main__":
+ add_blob_owner(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2], user_email=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_async_download.py b/storage/samples/snippets/storage_async_download.py
new file mode 100755
index 00000000000..ed8f3f304f9
--- /dev/null
+++ b/storage/samples/snippets/storage_async_download.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio
+import argparse
+
+"""Sample that asynchronously downloads multiple files from GCS to application's memory.
+"""
+
+
+# [START storage_async_download]
+# This sample can be run by calling `async.run(async_download_blobs('bucket_name', ['file1', 'file2']))`
+async def async_download_blobs(bucket_name, *file_names):
+ """Downloads a number of files in parallel from the bucket.
+ """
+ # The ID of your GCS bucket.
+ # bucket_name = "your-bucket-name"
+
+ # The list of files names to download, these files should be present in bucket.
+ # file_names = ["myfile1", "myfile2"]
+
+ import asyncio
+ from google.cloud import storage
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ loop = asyncio.get_running_loop()
+
+ tasks = []
+ for file_name in file_names:
+ blob = bucket.blob(file_name)
+ # The first arg, None, tells it to use the default loops executor
+ tasks.append(loop.run_in_executor(None, blob.download_as_bytes))
+
+ # If the method returns a value (such as download_as_bytes), gather will return the values
+ _ = await asyncio.gather(*tasks)
+ for file_name in file_names:
+ print(f"Downloaded storage object {file_name}")
+
+
+# [END storage_async_download]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-b', '--bucket_name', type=str, dest='bucket_name', help='provide the name of the GCS bucket')
+ parser.add_argument(
+ '-f', '--file_name',
+ action='append',
+ type=str,
+ dest='file_names',
+ help='Example: -f file1.txt or --file_name my_fav.mp4 . It can be used multiple times.'
+ )
+ args = parser.parse_args()
+
+ asyncio.run(async_download_blobs(args.bucket_name, *args.file_names))
diff --git a/storage/samples/snippets/storage_async_upload.py b/storage/samples/snippets/storage_async_upload.py
new file mode 100644
index 00000000000..25aabb63ee4
--- /dev/null
+++ b/storage/samples/snippets/storage_async_upload.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio
+import sys
+
+
+"""Sample that asynchronously uploads a file to GCS
+"""
+
+
+# [START storage_async_upload]
+# This sample can be run by calling `async.run(async_upload_blob('bucket_name'))`
+async def async_upload_blob(bucket_name):
+ """Uploads a number of files in parallel to the bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ import asyncio
+ from functools import partial
+ from google.cloud import storage
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ loop = asyncio.get_running_loop()
+
+ tasks = []
+ count = 3
+ for x in range(count):
+ blob_name = f"async_sample_blob_{x}"
+ content = f"Hello world #{x}"
+ blob = bucket.blob(blob_name)
+ # The first arg, None, tells it to use the default loops executor
+ tasks.append(loop.run_in_executor(None, partial(blob.upload_from_string, content)))
+
+ # If the method returns a value (such as download_as_string), gather will return the values
+ await asyncio.gather(*tasks)
+
+ print(f"Uploaded {count} files to bucket {bucket_name}")
+
+
+# [END storage_async_upload]
+
+
+if __name__ == "__main__":
+ asyncio.run(async_upload_blob(
+ bucket_name=sys.argv[1]
+ ))
diff --git a/storage/samples/snippets/storage_batch_request.py b/storage/samples/snippets/storage_batch_request.py
new file mode 100644
index 00000000000..7fe11fb1cf7
--- /dev/null
+++ b/storage/samples/snippets/storage_batch_request.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that uses a batch request.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/batch
+For more information, see README.md.
+"""
+
+# [START storage_batch_request]
+
+from google.cloud import storage
+
+
+def batch_request(bucket_name, prefix=None):
+ """
+ Use a batch request to patch a list of objects with the given prefix in a bucket.
+
+ Note that Cloud Storage does not support batch operations for uploading or downloading.
+ Additionally, the current batch design does not support library methods whose return values
+ depend on the response payload.
+ See https://cloud.google.com/python/docs/reference/storage/latest/google.cloud.storage.batch
+ """
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+ # The prefix of the object paths
+ # prefix = "directory-prefix/"
+
+ client = storage.Client()
+ bucket = client.bucket(bucket_name)
+
+ # Accumulate in a list the objects with a given prefix.
+ blobs_to_patch = [blob for blob in bucket.list_blobs(prefix=prefix)]
+
+ # Use a batch context manager to edit metadata in the list of blobs.
+ # The batch request is sent out when the context manager closes.
+ # No more than 100 calls should be included in a single batch request.
+ with client.batch():
+ for blob in blobs_to_patch:
+ metadata = {"your-metadata-key": "your-metadata-value"}
+ blob.metadata = metadata
+ blob.patch()
+
+ print(
+ f"Batch request edited metadata for all objects with the given prefix in {bucket.name}."
+ )
+
+
+# [END storage_batch_request]
+
+if __name__ == "__main__":
+ batch_request(bucket_name=sys.argv[1], prefix=sys.argv[2])
diff --git a/storage/samples/snippets/storage_bucket_delete_default_kms_key.py b/storage/samples/snippets/storage_bucket_delete_default_kms_key.py
new file mode 100644
index 00000000000..0db29375699
--- /dev/null
+++ b/storage/samples/snippets/storage_bucket_delete_default_kms_key.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_bucket_delete_default_kms_key]
+from google.cloud import storage
+
+
+def bucket_delete_default_kms_key(bucket_name):
+ """Delete a default KMS key of bucket"""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.default_kms_key_name = None
+ bucket.patch()
+
+ print(f"Default KMS key was removed from {bucket.name}")
+ return bucket
+
+
+# [END storage_bucket_delete_default_kms_key]
+
+if __name__ == "__main__":
+ bucket_delete_default_kms_key(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_change_default_storage_class.py b/storage/samples/snippets/storage_change_default_storage_class.py
new file mode 100644
index 00000000000..5d2f924ade7
--- /dev/null
+++ b/storage/samples/snippets/storage_change_default_storage_class.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_change_default_storage_class]
+from google.cloud import storage
+from google.cloud.storage import constants
+
+
+def change_default_storage_class(bucket_name):
+ """Change the default storage class of the bucket"""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.storage_class = constants.COLDLINE_STORAGE_CLASS
+ bucket.patch()
+
+ print(f"Default storage class for bucket {bucket_name} has been set to {bucket.storage_class}")
+ return bucket
+
+
+# [END storage_change_default_storage_class]
+
+if __name__ == "__main__":
+ change_default_storage_class(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_change_file_storage_class.py b/storage/samples/snippets/storage_change_file_storage_class.py
new file mode 100644
index 00000000000..a976ac8a4c8
--- /dev/null
+++ b/storage/samples/snippets/storage_change_file_storage_class.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_change_file_storage_class]
+from google.cloud import storage
+
+
+def change_file_storage_class(bucket_name, blob_name):
+ """Change the default storage class of the blob"""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ generation_match_precondition = None
+
+ # Optional: set a generation-match precondition to avoid potential race
+ # conditions and data corruptions. The request is aborted if the
+ # object's generation number does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in generation_match_precondition.
+ generation_match_precondition = blob.generation
+
+ blob.update_storage_class("NEARLINE", if_generation_match=generation_match_precondition)
+
+ print(
+ "Blob {} in bucket {} had its storage class set to {}".format(
+ blob_name,
+ bucket_name,
+ blob.storage_class
+ )
+ )
+ return blob
+# [END storage_change_file_storage_class]
+
+
+if __name__ == "__main__":
+ change_file_storage_class(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_compose_file.py b/storage/samples/snippets/storage_compose_file.py
new file mode 100644
index 00000000000..e673912725b
--- /dev/null
+++ b/storage/samples/snippets/storage_compose_file.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_compose_file]
+from google.cloud import storage
+
+
+def compose_file(bucket_name, first_blob_name, second_blob_name, destination_blob_name):
+ """Concatenate source blobs into destination blob."""
+ # bucket_name = "your-bucket-name"
+ # first_blob_name = "first-object-name"
+ # second_blob_name = "second-blob-name"
+ # destination_blob_name = "destination-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ destination = bucket.blob(destination_blob_name)
+ destination.content_type = "text/plain"
+
+ # Note sources is a list of Blob instances, up to the max of 32 instances per request
+ sources = [bucket.blob(first_blob_name), bucket.blob(second_blob_name)]
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to compose is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ # There is also an `if_source_generation_match` parameter, which is not used in this example.
+ destination_generation_match_precondition = 0
+
+ destination.compose(sources, if_generation_match=destination_generation_match_precondition)
+
+ print(
+ "New composite object {} in the bucket {} was created by combining {} and {}".format(
+ destination_blob_name, bucket_name, first_blob_name, second_blob_name
+ )
+ )
+ return destination
+
+
+# [END storage_compose_file]
+
+if __name__ == "__main__":
+ compose_file(
+ bucket_name=sys.argv[1],
+ first_blob_name=sys.argv[2],
+ second_blob_name=sys.argv[3],
+ destination_blob_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_configure_retries.py b/storage/samples/snippets/storage_configure_retries.py
new file mode 100644
index 00000000000..25c2529a42e
--- /dev/null
+++ b/storage/samples/snippets/storage_configure_retries.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that configures retries on an operation call.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/retry-strategy
+For more information, see README.md.
+"""
+
+# [START storage_configure_retries]
+from google.cloud import storage
+from google.cloud.storage.retry import DEFAULT_RETRY
+
+
+def configure_retries(bucket_name, blob_name):
+ """Configures retries with customizations."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of your GCS object
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ # Customize retry with a timeout of 500 seconds (default=120 seconds).
+ modified_retry = DEFAULT_RETRY.with_timeout(500.0)
+ # Customize retry with an initial wait time of 1.5 (default=1.0).
+ # Customize retry with a wait time multiplier per iteration of 1.2 (default=2.0).
+ # Customize retry with a maximum wait time of 45.0 (default=60.0).
+ modified_retry = modified_retry.with_delay(initial=1.5, multiplier=1.2, maximum=45.0)
+
+ # blob.delete() uses DEFAULT_RETRY by default.
+ # Pass in modified_retry to override the default retry behavior.
+ print(
+ f"The following library method is customized to be retried according to the following configurations: {modified_retry}"
+ )
+
+ blob.delete(retry=modified_retry)
+ print(f"Blob {blob_name} deleted with a customized retry strategy.")
+
+
+# [END storage_configure_retries]
+
+
+if __name__ == "__main__":
+ configure_retries(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_copy_file.py b/storage/samples/snippets/storage_copy_file.py
new file mode 100644
index 00000000000..b802de28b1b
--- /dev/null
+++ b/storage/samples/snippets/storage_copy_file.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_copy_file]
+from google.cloud import storage
+
+
+def copy_blob(
+ bucket_name, blob_name, destination_bucket_name, destination_blob_name,
+):
+ """Copies a blob from one bucket to another with a new name."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # destination_bucket_name = "destination-bucket-name"
+ # destination_blob_name = "destination-object-name"
+
+ storage_client = storage.Client()
+
+ source_bucket = storage_client.bucket(bucket_name)
+ source_blob = source_bucket.blob(blob_name)
+ destination_bucket = storage_client.bucket(destination_bucket_name)
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to copy is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ # There is also an `if_source_generation_match` parameter, which is not used in this example.
+ destination_generation_match_precondition = 0
+
+ blob_copy = source_bucket.copy_blob(
+ source_blob, destination_bucket, destination_blob_name, if_generation_match=destination_generation_match_precondition,
+ )
+
+ print(
+ "Blob {} in bucket {} copied to blob {} in bucket {}.".format(
+ source_blob.name,
+ source_bucket.name,
+ blob_copy.name,
+ destination_bucket.name,
+ )
+ )
+
+
+# [END storage_copy_file]
+
+if __name__ == "__main__":
+ copy_blob(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ destination_bucket_name=sys.argv[3],
+ destination_blob_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_copy_file_archived_generation.py b/storage/samples/snippets/storage_copy_file_archived_generation.py
new file mode 100644
index 00000000000..419d8e5a369
--- /dev/null
+++ b/storage/samples/snippets/storage_copy_file_archived_generation.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_copy_file_archived_generation]
+from google.cloud import storage
+
+
+def copy_file_archived_generation(
+ bucket_name, blob_name, destination_bucket_name, destination_blob_name, generation
+):
+ """Copies a blob from one bucket to another with a new name with the same generation."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # destination_bucket_name = "destination-bucket-name"
+ # destination_blob_name = "destination-object-name"
+ # generation = 1579287380533984
+
+ storage_client = storage.Client()
+
+ source_bucket = storage_client.bucket(bucket_name)
+ source_blob = source_bucket.blob(blob_name)
+ destination_bucket = storage_client.bucket(destination_bucket_name)
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to copy is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ destination_generation_match_precondition = 0
+
+ # source_generation selects a specific revision of the source object, as opposed to the latest version.
+ blob_copy = source_bucket.copy_blob(
+ source_blob, destination_bucket, destination_blob_name, source_generation=generation, if_generation_match=destination_generation_match_precondition
+ )
+
+ print(
+ "Generation {} of the blob {} in bucket {} copied to blob {} in bucket {}.".format(
+ generation,
+ source_blob.name,
+ source_bucket.name,
+ blob_copy.name,
+ destination_bucket.name,
+ )
+ )
+
+
+# [END storage_copy_file_archived_generation]
+
+if __name__ == "__main__":
+ copy_file_archived_generation(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ destination_bucket_name=sys.argv[3],
+ destination_blob_name=sys.argv[4],
+ generation=sys.argv[5]
+ )
diff --git a/storage/samples/snippets/storage_cors_configuration.py b/storage/samples/snippets/storage_cors_configuration.py
new file mode 100644
index 00000000000..2c5dd242870
--- /dev/null
+++ b/storage/samples/snippets/storage_cors_configuration.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_cors_configuration]
+from google.cloud import storage
+
+
+def cors_configuration(bucket_name):
+ """Set a bucket's CORS policies configuration."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.cors = [
+ {
+ "origin": ["*"],
+ "responseHeader": [
+ "Content-Type",
+ "x-goog-resumable"],
+ "method": ['PUT', 'POST'],
+ "maxAgeSeconds": 3600
+ }
+ ]
+ bucket.patch()
+
+ print(f"Set CORS policies for bucket {bucket.name} is {bucket.cors}")
+ return bucket
+
+
+# [END storage_cors_configuration]
+
+if __name__ == "__main__":
+ cors_configuration(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_bucket.py b/storage/samples/snippets/storage_create_bucket.py
new file mode 100644
index 00000000000..c95f32f569b
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_create_bucket]
+from google.cloud import storage
+
+
+def create_bucket(bucket_name):
+ """Creates a new bucket."""
+ # bucket_name = "your-new-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.create_bucket(bucket_name)
+
+ print(f"Bucket {bucket.name} created")
+
+
+# [END storage_create_bucket]
+
+if __name__ == "__main__":
+ create_bucket(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_bucket_class_location.py b/storage/samples/snippets/storage_create_bucket_class_location.py
new file mode 100644
index 00000000000..51fa864405d
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_class_location.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_create_bucket_class_location]
+from google.cloud import storage
+
+
+def create_bucket_class_location(bucket_name):
+ """
+ Create a new bucket in the US region with the coldline storage
+ class
+ """
+ # bucket_name = "your-new-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ bucket.storage_class = "COLDLINE"
+ new_bucket = storage_client.create_bucket(bucket, location="us")
+
+ print(
+ "Created bucket {} in {} with storage class {}".format(
+ new_bucket.name, new_bucket.location, new_bucket.storage_class
+ )
+ )
+ return new_bucket
+
+
+# [END storage_create_bucket_class_location]
+
+if __name__ == "__main__":
+ create_bucket_class_location(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_bucket_dual_region.py b/storage/samples/snippets/storage_create_bucket_dual_region.py
new file mode 100644
index 00000000000..c5a78fa0f9b
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_dual_region.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2022 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""
+Sample that creates a dual region bucket.
+"""
+
+# [START storage_create_bucket_dual_region]
+from google.cloud import storage
+
+
+def create_bucket_dual_region(bucket_name, location, region_1, region_2):
+ """Creates a Dual-Region Bucket with provided location and regions.."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The bucket's pair of regions. Case-insensitive.
+ # See this documentation for other valid locations:
+ # https://cloud.google.com/storage/docs/locations
+ # region_1 = "US-EAST1"
+ # region_2 = "US-WEST1"
+ # location = "US"
+
+ storage_client = storage.Client()
+ bucket = storage_client.create_bucket(bucket_name, location=location, data_locations=[region_1, region_2])
+
+ print(f"Created bucket {bucket_name}")
+ print(f" - location: {bucket.location}")
+ print(f" - location_type: {bucket.location_type}")
+ print(f" - customPlacementConfig data_locations: {bucket.data_locations}")
+
+
+# [END storage_create_bucket_dual_region]
+
+
+if __name__ == "__main__":
+ create_bucket_dual_region(
+ bucket_name=sys.argv[1], location=sys.argv[2], region_1=sys.argv[3], region_2=sys.argv[4]
+ )
diff --git a/storage/samples/snippets/storage_create_bucket_hierarchical_namespace.py b/storage/samples/snippets/storage_create_bucket_hierarchical_namespace.py
new file mode 100644
index 00000000000..d9d31077251
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_hierarchical_namespace.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_create_bucket_hierarchical_namespace]
+from google.cloud import storage
+
+
+def create_bucket_hierarchical_namespace(bucket_name):
+ """Creates a bucket with hierarchical namespace enabled."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ bucket.iam_configuration.uniform_bucket_level_access_enabled = True
+ bucket.hierarchical_namespace_enabled = True
+ bucket.create()
+
+ print(f"Created bucket {bucket_name} with hierarchical namespace enabled.")
+
+
+# [END storage_create_bucket_hierarchical_namespace]
+
+
+if __name__ == "__main__":
+ create_bucket_hierarchical_namespace(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_bucket_notifications.py b/storage/samples/snippets/storage_create_bucket_notifications.py
new file mode 100644
index 00000000000..a6f218c36fa
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_notifications.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that creates a notification configuration for a bucket.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/reporting-changes
+For more information, see README.md.
+"""
+
+# [START storage_create_bucket_notifications]
+from google.cloud import storage
+
+
+def create_bucket_notifications(bucket_name, topic_name):
+ """Creates a notification configuration for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The name of a topic
+ # topic_name = "your-topic-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ notification = bucket.notification(topic_name=topic_name)
+ notification.create()
+
+ print(f"Successfully created notification with ID {notification.notification_id} for bucket {bucket_name}")
+
+# [END storage_create_bucket_notifications]
+
+
+if __name__ == "__main__":
+ create_bucket_notifications(bucket_name=sys.argv[1], topic_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_create_bucket_object_retention.py b/storage/samples/snippets/storage_create_bucket_object_retention.py
new file mode 100644
index 00000000000..4ebc32c0a25
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_object_retention.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_create_bucket_with_object_retention]
+from google.cloud import storage
+
+
+def create_bucket_object_retention(bucket_name):
+ """Creates a bucket with object retention enabled."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.create_bucket(bucket_name, enable_object_retention=True)
+
+ print(f"Created bucket {bucket_name} with object retention enabled setting: {bucket.object_retention_mode}")
+
+
+# [END storage_create_bucket_with_object_retention]
+
+
+if __name__ == "__main__":
+ create_bucket_object_retention(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_bucket_turbo_replication.py b/storage/samples/snippets/storage_create_bucket_turbo_replication.py
new file mode 100644
index 00000000000..bc05597958f
--- /dev/null
+++ b/storage/samples/snippets/storage_create_bucket_turbo_replication.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that creates a new bucket with dual-region and turbo replication.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/managing-turbo-replication
+For more information, see README.md.
+"""
+
+# [START storage_create_bucket_turbo_replication]
+
+from google.cloud import storage
+from google.cloud.storage.constants import RPO_ASYNC_TURBO
+
+
+def create_bucket_turbo_replication(bucket_name):
+ """Creates dual-region bucket with turbo replication enabled."""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ bucket_location = "NAM4"
+ bucket.rpo = RPO_ASYNC_TURBO
+ bucket.create(location=bucket_location)
+
+ print(f"{bucket.name} created with the recovery point objective (RPO) set to {bucket.rpo} in {bucket.location}.")
+
+
+# [END storage_create_bucket_turbo_replication]
+
+if __name__ == "__main__":
+ create_bucket_turbo_replication(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_create_hmac_key.py b/storage/samples/snippets/storage_create_hmac_key.py
new file mode 100644
index 00000000000..d845738b780
--- /dev/null
+++ b/storage/samples/snippets/storage_create_hmac_key.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_create_hmac_key]
+from google.cloud import storage
+
+
+def create_key(project_id, service_account_email):
+ """
+ Create a new HMAC key using the given project and service account.
+ """
+ # project_id = 'Your Google Cloud project ID'
+ # service_account_email = 'Service account used to generate the HMAC key'
+
+ storage_client = storage.Client(project=project_id)
+
+ hmac_key, secret = storage_client.create_hmac_key(
+ service_account_email=service_account_email, project_id=project_id
+ )
+
+ print(f"The base64 encoded secret is {secret}")
+ print("Do not miss that secret, there is no API to recover it.")
+ print("The HMAC key metadata is:")
+ print(f"Service Account Email: {hmac_key.service_account_email}")
+ print(f"Key ID: {hmac_key.id}")
+ print(f"Access ID: {hmac_key.access_id}")
+ print(f"Project ID: {hmac_key.project}")
+ print(f"State: {hmac_key.state}")
+ print(f"Created At: {hmac_key.time_created}")
+ print(f"Updated At: {hmac_key.updated}")
+ print(f"Etag: {hmac_key.etag}")
+ return hmac_key
+
+
+# [END storage_create_hmac_key]
+
+if __name__ == "__main__":
+ create_key(project_id=sys.argv[1], service_account_email=sys.argv[2])
diff --git a/storage/samples/snippets/storage_deactivate_hmac_key.py b/storage/samples/snippets/storage_deactivate_hmac_key.py
new file mode 100644
index 00000000000..007f7b5a5f2
--- /dev/null
+++ b/storage/samples/snippets/storage_deactivate_hmac_key.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_deactivate_hmac_key]
+from google.cloud import storage
+
+
+def deactivate_key(access_id, project_id):
+ """
+ Deactivate the HMAC key with the given access ID.
+ """
+ # project_id = "Your Google Cloud project ID"
+ # access_id = "ID of an active HMAC key"
+
+ storage_client = storage.Client(project=project_id)
+
+ hmac_key = storage_client.get_hmac_key_metadata(
+ access_id, project_id=project_id
+ )
+ hmac_key.state = "INACTIVE"
+ hmac_key.update()
+
+ print("The HMAC key is now inactive.")
+ print("The HMAC key metadata is:")
+ print(f"Service Account Email: {hmac_key.service_account_email}")
+ print(f"Key ID: {hmac_key.id}")
+ print(f"Access ID: {hmac_key.access_id}")
+ print(f"Project ID: {hmac_key.project}")
+ print(f"State: {hmac_key.state}")
+ print(f"Created At: {hmac_key.time_created}")
+ print(f"Updated At: {hmac_key.updated}")
+ print(f"Etag: {hmac_key.etag}")
+ return hmac_key
+
+
+# [END storage_deactivate_hmac_key]
+
+if __name__ == "__main__":
+ deactivate_key(access_id=sys.argv[1], project_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_define_bucket_website_configuration.py b/storage/samples/snippets/storage_define_bucket_website_configuration.py
new file mode 100644
index 00000000000..ce6c7e66cdb
--- /dev/null
+++ b/storage/samples/snippets/storage_define_bucket_website_configuration.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_define_bucket_website_configuration]
+from google.cloud import storage
+
+
+def define_bucket_website_configuration(bucket_name, main_page_suffix, not_found_page):
+ """Configure website-related properties of bucket"""
+ # bucket_name = "your-bucket-name"
+ # main_page_suffix = "index.html"
+ # not_found_page = "404.html"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.configure_website(main_page_suffix, not_found_page)
+ bucket.patch()
+
+ print(
+ "Static website bucket {} is set up to use {} as the index page and {} as the 404 page".format(
+ bucket.name, main_page_suffix, not_found_page
+ )
+ )
+ return bucket
+
+
+# [END storage_define_bucket_website_configuration]
+
+if __name__ == "__main__":
+ define_bucket_website_configuration(
+ bucket_name=sys.argv[1],
+ main_page_suffix=sys.argv[2],
+ not_found_page=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_delete_bucket.py b/storage/samples/snippets/storage_delete_bucket.py
new file mode 100644
index 00000000000..b12c066361d
--- /dev/null
+++ b/storage/samples/snippets/storage_delete_bucket.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_delete_bucket]
+from google.cloud import storage
+
+
+def delete_bucket(bucket_name):
+ """Deletes a bucket. The bucket must be empty."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.delete()
+
+ print(f"Bucket {bucket.name} deleted")
+
+
+# [END storage_delete_bucket]
+
+if __name__ == "__main__":
+ delete_bucket(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_delete_bucket_notification.py b/storage/samples/snippets/storage_delete_bucket_notification.py
new file mode 100644
index 00000000000..efd41771d60
--- /dev/null
+++ b/storage/samples/snippets/storage_delete_bucket_notification.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that deletes a notification configuration for a bucket.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/reporting-changes
+For more information, see README.md.
+"""
+
+# [START storage_delete_bucket_notification]
+from google.cloud import storage
+
+
+def delete_bucket_notification(bucket_name, notification_id):
+ """Deletes a notification configuration for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of the notification
+ # notification_id = "your-notification-id"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ notification = bucket.notification(notification_id=notification_id)
+ notification.delete()
+
+ print(f"Successfully deleted notification with ID {notification_id} for bucket {bucket_name}")
+
+# [END storage_delete_bucket_notification]
+
+
+if __name__ == "__main__":
+ delete_bucket_notification(bucket_name=sys.argv[1], notification_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_delete_file.py b/storage/samples/snippets/storage_delete_file.py
new file mode 100644
index 00000000000..427604145dd
--- /dev/null
+++ b/storage/samples/snippets/storage_delete_file.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_delete_file]
+from google.cloud import storage
+
+
+def delete_blob(bucket_name, blob_name):
+ """Deletes a blob from the bucket."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ generation_match_precondition = None
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to delete is aborted if the object's
+ # generation number does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in generation_match_precondition.
+ generation_match_precondition = blob.generation
+
+ blob.delete(if_generation_match=generation_match_precondition)
+
+ print(f"Blob {blob_name} deleted.")
+
+
+# [END storage_delete_file]
+
+if __name__ == "__main__":
+ delete_blob(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_delete_file_archived_generation.py b/storage/samples/snippets/storage_delete_file_archived_generation.py
new file mode 100644
index 00000000000..ff02bca23dc
--- /dev/null
+++ b/storage/samples/snippets/storage_delete_file_archived_generation.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_delete_file_archived_generation]
+from google.cloud import storage
+
+
+def delete_file_archived_generation(bucket_name, blob_name, generation):
+ """Delete a blob in the bucket with the given generation."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # generation = 1579287380533984
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.delete_blob(blob_name, generation=generation)
+ print(
+ f"Generation {generation} of blob {blob_name} was deleted from {bucket_name}"
+ )
+
+
+# [END storage_delete_file_archived_generation]
+
+
+if __name__ == "__main__":
+ delete_file_archived_generation(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ generation=sys.argv[3]
+ )
diff --git a/storage/samples/snippets/storage_delete_hmac_key.py b/storage/samples/snippets/storage_delete_hmac_key.py
new file mode 100644
index 00000000000..403dc193b22
--- /dev/null
+++ b/storage/samples/snippets/storage_delete_hmac_key.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_delete_hmac_key]
+from google.cloud import storage
+
+
+def delete_key(access_id, project_id):
+ """
+ Delete the HMAC key with the given access ID. Key must have state INACTIVE
+ in order to succeed.
+ """
+ # project_id = "Your Google Cloud project ID"
+ # access_id = "ID of an HMAC key (must be in INACTIVE state)"
+
+ storage_client = storage.Client(project=project_id)
+
+ hmac_key = storage_client.get_hmac_key_metadata(
+ access_id, project_id=project_id
+ )
+ hmac_key.delete()
+
+ print(
+ "The key is deleted, though it may still appear in list_hmac_keys()"
+ " results."
+ )
+
+
+# [END storage_delete_hmac_key]
+
+if __name__ == "__main__":
+ delete_key(access_id=sys.argv[1], project_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_disable_bucket_lifecycle_management.py b/storage/samples/snippets/storage_disable_bucket_lifecycle_management.py
new file mode 100644
index 00000000000..a5fa56fcf35
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_bucket_lifecycle_management.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_bucket_lifecycle_management]
+from google.cloud import storage
+
+
+def disable_bucket_lifecycle_management(bucket_name):
+ """Disable lifecycle management for a bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.clear_lifecyle_rules()
+ bucket.patch()
+ rules = bucket.lifecycle_rules
+
+ print(f"Lifecycle management is disable for bucket {bucket_name} and the rules are {list(rules)}")
+ return bucket
+
+
+# [END storage_disable_bucket_lifecycle_management]
+
+if __name__ == "__main__":
+ disable_bucket_lifecycle_management(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_disable_default_event_based_hold.py b/storage/samples/snippets/storage_disable_default_event_based_hold.py
new file mode 100644
index 00000000000..48becdac1c0
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_default_event_based_hold.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_default_event_based_hold]
+from google.cloud import storage
+
+
+def disable_default_event_based_hold(bucket_name):
+ """Disables the default event based hold on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.default_event_based_hold = False
+ bucket.patch()
+
+ print(f"Default event based hold was disabled for {bucket_name}")
+
+
+# [END storage_disable_default_event_based_hold]
+
+
+if __name__ == "__main__":
+ disable_default_event_based_hold(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_disable_requester_pays.py b/storage/samples/snippets/storage_disable_requester_pays.py
new file mode 100644
index 00000000000..78e195d8a4a
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_requester_pays.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_requester_pays]
+from google.cloud import storage
+
+
+def disable_requester_pays(bucket_name):
+ """Disable a bucket's requesterpays metadata"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.requester_pays = False
+ bucket.patch()
+
+ print(f"Requester Pays has been disabled for {bucket_name}")
+
+
+# [END storage_disable_requester_pays]
+
+
+if __name__ == "__main__":
+ disable_requester_pays(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_disable_soft_delete.py b/storage/samples/snippets/storage_disable_soft_delete.py
new file mode 100644
index 00000000000..dc2447ae873
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_soft_delete.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_soft_delete]
+from google.cloud import storage
+
+
+def disable_soft_delete(bucket_name):
+ """Disable soft-delete policy for the bucket."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ # Setting the retention duration to 0 disables soft-delete.
+ bucket.soft_delete_policy.retention_duration_seconds = 0
+ bucket.patch()
+
+ print(f"Soft-delete policy is disabled for bucket {bucket_name}")
+
+
+# [END storage_disable_soft_delete]
+
+if __name__ == "__main__":
+ disable_soft_delete(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_disable_uniform_bucket_level_access.py b/storage/samples/snippets/storage_disable_uniform_bucket_level_access.py
new file mode 100644
index 00000000000..20a045686c3
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_uniform_bucket_level_access.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_uniform_bucket_level_access]
+from google.cloud import storage
+
+
+def disable_uniform_bucket_level_access(bucket_name):
+ """Disable uniform bucket-level access for a bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ bucket.iam_configuration.uniform_bucket_level_access_enabled = False
+ bucket.patch()
+
+ print(
+ f"Uniform bucket-level access was disabled for {bucket.name}."
+ )
+
+
+# [END storage_disable_uniform_bucket_level_access]
+
+if __name__ == "__main__":
+ disable_uniform_bucket_level_access(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_disable_versioning.py b/storage/samples/snippets/storage_disable_versioning.py
new file mode 100644
index 00000000000..9dfd0ff909a
--- /dev/null
+++ b/storage/samples/snippets/storage_disable_versioning.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_disable_versioning]
+from google.cloud import storage
+
+
+def disable_versioning(bucket_name):
+ """Disable versioning for this bucket."""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.versioning_enabled = False
+ bucket.patch()
+
+ print(f"Versioning was disabled for bucket {bucket}")
+ return bucket
+
+
+# [END storage_disable_versioning]
+
+if __name__ == "__main__":
+ disable_versioning(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_download_byte_range.py b/storage/samples/snippets/storage_download_byte_range.py
new file mode 100644
index 00000000000..e6143a04f46
--- /dev/null
+++ b/storage/samples/snippets/storage_download_byte_range.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_download_byte_range]
+from google.cloud import storage
+
+
+def download_byte_range(
+ bucket_name, source_blob_name, start_byte, end_byte, destination_file_name
+):
+ """Downloads a blob from the bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your GCS object
+ # source_blob_name = "storage-object-name"
+
+ # The starting byte at which to begin the download
+ # start_byte = 0
+
+ # The ending byte at which to end the download
+ # end_byte = 20
+
+ # The path to which the file should be downloaded
+ # destination_file_name = "local/path/to/file"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Construct a client side representation of a blob.
+ # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
+ # any content from Google Cloud Storage. As we don't need additional data,
+ # using `Bucket.blob` is preferred here.
+ blob = bucket.blob(source_blob_name)
+ blob.download_to_filename(destination_file_name, start=start_byte, end=end_byte)
+
+ print(
+ "Downloaded bytes {} to {} of object {} from bucket {} to local file {}.".format(
+ start_byte, end_byte, source_blob_name, bucket_name, destination_file_name
+ )
+ )
+
+
+# [END storage_download_byte_range]
+
+if __name__ == "__main__":
+ download_byte_range(
+ bucket_name=sys.argv[1],
+ source_blob_name=sys.argv[2],
+ start_byte=sys.argv[3],
+ end_byte=sys.argv[4],
+ destination_file_name=sys.argv[5],
+ )
diff --git a/storage/samples/snippets/storage_download_encrypted_file.py b/storage/samples/snippets/storage_download_encrypted_file.py
new file mode 100644
index 00000000000..8a81b0de597
--- /dev/null
+++ b/storage/samples/snippets/storage_download_encrypted_file.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_download_encrypted_file]
+import base64
+# [END storage_download_encrypted_file]
+import sys
+# [START storage_download_encrypted_file]
+
+from google.cloud import storage
+
+
+def download_encrypted_blob(
+ bucket_name,
+ source_blob_name,
+ destination_file_name,
+ base64_encryption_key,
+):
+ """Downloads a previously-encrypted blob from Google Cloud Storage.
+
+ The encryption key provided must be the same key provided when uploading
+ the blob.
+ """
+ # bucket_name = "your-bucket-name"
+ # source_blob_name = "storage-object-name"
+ # destination_file_name = "local/path/to/file"
+ # base64_encryption_key = "base64-encoded-encryption-key"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Encryption key must be an AES256 key represented as a bytestring with
+ # 32 bytes. Since it's passed in as a base64 encoded string, it needs
+ # to be decoded.
+ encryption_key = base64.b64decode(base64_encryption_key)
+ blob = bucket.blob(source_blob_name, encryption_key=encryption_key)
+
+ blob.download_to_filename(destination_file_name)
+
+ print(
+ f"Blob {source_blob_name} downloaded to {destination_file_name}."
+ )
+
+
+# [END storage_download_encrypted_file]
+
+if __name__ == "__main__":
+ download_encrypted_blob(
+ bucket_name=sys.argv[1],
+ source_blob_name=sys.argv[2],
+ destination_file_name=sys.argv[3],
+ base64_encryption_key=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_download_file.py b/storage/samples/snippets/storage_download_file.py
new file mode 100644
index 00000000000..f8a1c93c83c
--- /dev/null
+++ b/storage/samples/snippets/storage_download_file.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_download_file]
+from google.cloud import storage
+
+
+def download_blob(bucket_name, source_blob_name, destination_file_name):
+ """Downloads a blob from the bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your GCS object
+ # source_blob_name = "storage-object-name"
+
+ # The path to which the file should be downloaded
+ # destination_file_name = "local/path/to/file"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Construct a client side representation of a blob.
+ # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
+ # any content from Google Cloud Storage. As we don't need additional data,
+ # using `Bucket.blob` is preferred here.
+ blob = bucket.blob(source_blob_name)
+ blob.download_to_filename(destination_file_name)
+
+ print(
+ "Downloaded storage object {} from bucket {} to local file {}.".format(
+ source_blob_name, bucket_name, destination_file_name
+ )
+ )
+
+
+# [END storage_download_file]
+
+if __name__ == "__main__":
+ download_blob(
+ bucket_name=sys.argv[1],
+ source_blob_name=sys.argv[2],
+ destination_file_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_download_file_requester_pays.py b/storage/samples/snippets/storage_download_file_requester_pays.py
new file mode 100644
index 00000000000..babbafda7c2
--- /dev/null
+++ b/storage/samples/snippets/storage_download_file_requester_pays.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_download_file_requester_pays]
+from google.cloud import storage
+
+
+def download_file_requester_pays(
+ bucket_name, project_id, source_blob_name, destination_file_name
+):
+ """Download file using specified project as the requester"""
+ # bucket_name = "your-bucket-name"
+ # project_id = "your-project-id"
+ # source_blob_name = "source-blob-name"
+ # destination_file_name = "local-destination-file-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name, user_project=project_id)
+ blob = bucket.blob(source_blob_name)
+ blob.download_to_filename(destination_file_name)
+
+ print(
+ "Blob {} downloaded to {} using a requester-pays request.".format(
+ source_blob_name, destination_file_name
+ )
+ )
+
+
+# [END storage_download_file_requester_pays]
+
+if __name__ == "__main__":
+ download_file_requester_pays(
+ bucket_name=sys.argv[1],
+ project_id=sys.argv[2],
+ source_blob_name=sys.argv[3],
+ destination_file_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_download_into_memory.py b/storage/samples/snippets/storage_download_into_memory.py
new file mode 100644
index 00000000000..97f677054d5
--- /dev/null
+++ b/storage/samples/snippets/storage_download_into_memory.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_file_download_into_memory]
+from google.cloud import storage
+
+
+def download_blob_into_memory(bucket_name, blob_name):
+ """Downloads a blob into memory."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your GCS object
+ # blob_name = "storage-object-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Construct a client side representation of a blob.
+ # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
+ # any content from Google Cloud Storage. As we don't need additional data,
+ # using `Bucket.blob` is preferred here.
+ blob = bucket.blob(blob_name)
+ contents = blob.download_as_bytes()
+
+ print(
+ "Downloaded storage object {} from bucket {} as the following bytes object: {}.".format(
+ blob_name, bucket_name, contents.decode("utf-8")
+ )
+ )
+
+
+# [END storage_file_download_into_memory]
+
+if __name__ == "__main__":
+ download_blob_into_memory(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ )
diff --git a/storage/samples/snippets/storage_download_public_file.py b/storage/samples/snippets/storage_download_public_file.py
new file mode 100644
index 00000000000..8fbb68405af
--- /dev/null
+++ b/storage/samples/snippets/storage_download_public_file.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_download_public_file]
+from google.cloud import storage
+
+
+def download_public_file(bucket_name, source_blob_name, destination_file_name):
+ """Downloads a public blob from the bucket."""
+ # bucket_name = "your-bucket-name"
+ # source_blob_name = "storage-object-name"
+ # destination_file_name = "local/path/to/file"
+
+ storage_client = storage.Client.create_anonymous_client()
+
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(source_blob_name)
+ blob.download_to_filename(destination_file_name)
+
+ print(
+ "Downloaded public blob {} from bucket {} to {}.".format(
+ source_blob_name, bucket.name, destination_file_name
+ )
+ )
+
+
+# [END storage_download_public_file]
+
+if __name__ == "__main__":
+ download_public_file(
+ bucket_name=sys.argv[1],
+ source_blob_name=sys.argv[2],
+ destination_file_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_download_to_stream.py b/storage/samples/snippets/storage_download_to_stream.py
new file mode 100644
index 00000000000..3834e34c917
--- /dev/null
+++ b/storage/samples/snippets/storage_download_to_stream.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_stream_file_download]
+from google.cloud import storage
+
+
+def download_blob_to_stream(bucket_name, source_blob_name, file_obj):
+ """Downloads a blob to a stream or other file-like object."""
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your GCS object (blob)
+ # source_blob_name = "storage-object-name"
+
+ # The stream or file (file-like object) to which the blob will be written
+ # import io
+ # file_obj = io.BytesIO()
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+
+ # Construct a client-side representation of a blob.
+ # Note `Bucket.blob` differs from `Bucket.get_blob` in that it doesn't
+ # retrieve metadata from Google Cloud Storage. As we don't use metadata in
+ # this example, using `Bucket.blob` is preferred here.
+ blob = bucket.blob(source_blob_name)
+ blob.download_to_file(file_obj)
+
+ print(f"Downloaded blob {source_blob_name} to file-like object.")
+
+ return file_obj
+ # Before reading from file_obj, remember to rewind with file_obj.seek(0).
+
+# [END storage_stream_file_download]
diff --git a/storage/samples/snippets/storage_enable_bucket_lifecycle_management.py b/storage/samples/snippets/storage_enable_bucket_lifecycle_management.py
new file mode 100644
index 00000000000..0bbff079c8a
--- /dev/null
+++ b/storage/samples/snippets/storage_enable_bucket_lifecycle_management.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_enable_bucket_lifecycle_management]
+from google.cloud import storage
+
+
+def enable_bucket_lifecycle_management(bucket_name):
+ """Enable lifecycle management for a bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ rules = bucket.lifecycle_rules
+
+ print(f"Lifecycle management rules for bucket {bucket_name} are {list(rules)}")
+ bucket.add_lifecycle_delete_rule(age=2)
+ bucket.patch()
+
+ rules = bucket.lifecycle_rules
+ print(f"Lifecycle management is enable for bucket {bucket_name} and the rules are {list(rules)}")
+
+ return bucket
+
+
+# [END storage_enable_bucket_lifecycle_management]
+
+if __name__ == "__main__":
+ enable_bucket_lifecycle_management(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_enable_default_event_based_hold.py b/storage/samples/snippets/storage_enable_default_event_based_hold.py
new file mode 100644
index 00000000000..5dfdf94a983
--- /dev/null
+++ b/storage/samples/snippets/storage_enable_default_event_based_hold.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_enable_default_event_based_hold]
+from google.cloud import storage
+
+
+def enable_default_event_based_hold(bucket_name):
+ """Enables the default event based hold on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ bucket.default_event_based_hold = True
+ bucket.patch()
+
+ print(f"Default event based hold was enabled for {bucket_name}")
+
+
+# [END storage_enable_default_event_based_hold]
+
+
+if __name__ == "__main__":
+ enable_default_event_based_hold(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_enable_requester_pays.py b/storage/samples/snippets/storage_enable_requester_pays.py
new file mode 100644
index 00000000000..fbecb04f47c
--- /dev/null
+++ b/storage/samples/snippets/storage_enable_requester_pays.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_enable_requester_pays]
+from google.cloud import storage
+
+
+def enable_requester_pays(bucket_name):
+ """Enable a bucket's requesterpays metadata"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.requester_pays = True
+ bucket.patch()
+
+ print(f"Requester Pays has been enabled for {bucket_name}")
+
+
+# [END storage_enable_requester_pays]
+
+if __name__ == "__main__":
+ enable_requester_pays(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_enable_uniform_bucket_level_access.py b/storage/samples/snippets/storage_enable_uniform_bucket_level_access.py
new file mode 100644
index 00000000000..9ab71ae3730
--- /dev/null
+++ b/storage/samples/snippets/storage_enable_uniform_bucket_level_access.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_enable_uniform_bucket_level_access]
+from google.cloud import storage
+
+
+def enable_uniform_bucket_level_access(bucket_name):
+ """Enable uniform bucket-level access for a bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ bucket.iam_configuration.uniform_bucket_level_access_enabled = True
+ bucket.patch()
+
+ print(
+ f"Uniform bucket-level access was enabled for {bucket.name}."
+ )
+
+
+# [END storage_enable_uniform_bucket_level_access]
+
+if __name__ == "__main__":
+ enable_uniform_bucket_level_access(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_enable_versioning.py b/storage/samples/snippets/storage_enable_versioning.py
new file mode 100644
index 00000000000..9cdc980016e
--- /dev/null
+++ b/storage/samples/snippets/storage_enable_versioning.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_enable_versioning]
+from google.cloud import storage
+
+
+def enable_versioning(bucket_name):
+ """Enable versioning for this bucket."""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.versioning_enabled = True
+ bucket.patch()
+
+ print(f"Versioning was enabled for bucket {bucket.name}")
+ return bucket
+
+
+# [END storage_enable_versioning]
+
+if __name__ == "__main__":
+ enable_versioning(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_fileio_pandas.py b/storage/samples/snippets/storage_fileio_pandas.py
new file mode 100644
index 00000000000..d4d01edd784
--- /dev/null
+++ b/storage/samples/snippets/storage_fileio_pandas.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that creates and consumes a GCS blob using pandas with file-like IO
+"""
+
+# [START storage_fileio_pandas_write]
+
+
+def pandas_write(bucket_name, blob_name):
+ """Use pandas to interact with GCS using file-like IO"""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your new GCS object
+ # blob_name = "storage-object-name"
+
+ from google.cloud import storage
+ import pandas as pd
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ with blob.open("w") as f:
+ df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
+ f.write(df.to_csv(index=False))
+
+ print(f"Wrote csv with pandas with name {blob_name} from bucket {bucket.name}.")
+
+
+# [END storage_fileio_pandas_write]
+
+
+# [START storage_fileio_pandas_read]
+
+
+def pandas_read(bucket_name, blob_name):
+ """Use pandas to interact with GCS using file-like IO"""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your new GCS object
+ # blob_name = "storage-object-name"
+
+ from google.cloud import storage
+ import pandas as pd
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ with blob.open("r") as f:
+ pd.read_csv(f)
+
+ print(f"Read csv with pandas with name {blob_name} from bucket {bucket.name}.")
+
+
+# [END storage_fileio_pandas_read]
+
+
+if __name__ == "__main__":
+ pandas_write(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2]
+ )
+
+ pandas_read(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_fileio_write_read.py b/storage/samples/snippets/storage_fileio_write_read.py
new file mode 100644
index 00000000000..5d35c84ab51
--- /dev/null
+++ b/storage/samples/snippets/storage_fileio_write_read.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that writes and read a blob in GCS using file-like IO
+"""
+
+# [START storage_fileio_write_read]
+from google.cloud import storage
+
+
+def write_read(bucket_name, blob_name):
+ """Write and read a blob from GCS using file-like IO"""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The ID of your new GCS object
+ # blob_name = "storage-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ # Mode can be specified as wb/rb for bytes mode.
+ # See: https://docs.python.org/3/library/io.html
+ with blob.open("w") as f:
+ f.write("Hello world")
+
+ with blob.open("r") as f:
+ print(f.read())
+
+
+# [END storage_fileio_write_read]
+
+if __name__ == "__main__":
+ write_read(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_generate_encryption_key.py b/storage/samples/snippets/storage_generate_encryption_key.py
new file mode 100644
index 00000000000..dbeb46b914b
--- /dev/null
+++ b/storage/samples/snippets/storage_generate_encryption_key.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_generate_encryption_key]
+import base64
+import os
+
+
+def generate_encryption_key():
+ """Generates a 256 bit (32 byte) AES encryption key and prints the
+ base64 representation.
+
+ This is included for demonstration purposes. You should generate your own
+ key. Please remember that encryption keys should be handled with a
+ comprehensive security policy.
+ """
+ key = os.urandom(32)
+ encoded_key = base64.b64encode(key).decode("utf-8")
+
+ print(f"Base 64 encoded encryption key: {encoded_key}")
+
+
+# [END storage_generate_encryption_key]
+
+if __name__ == "__main__":
+ generate_encryption_key()
diff --git a/storage/samples/snippets/storage_generate_signed_post_policy_v4.py b/storage/samples/snippets/storage_generate_signed_post_policy_v4.py
new file mode 100644
index 00000000000..0c06ddc2fd4
--- /dev/null
+++ b/storage/samples/snippets/storage_generate_signed_post_policy_v4.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_generate_signed_post_policy_v4]
+import datetime
+# [END storage_generate_signed_post_policy_v4]
+import sys
+# [START storage_generate_signed_post_policy_v4]
+
+from google.cloud import storage
+
+
+def generate_signed_post_policy_v4(bucket_name, blob_name):
+ """Generates a v4 POST Policy and prints an HTML form."""
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+
+ policy = storage_client.generate_signed_post_policy_v4(
+ bucket_name,
+ blob_name,
+ expiration=datetime.timedelta(minutes=10),
+ fields={
+ 'x-goog-meta-test': 'data'
+ }
+ )
+
+ # Create an HTML form with the provided policy
+ header = "
"
+
+ print(form)
+
+ return form
+
+
+# [END storage_generate_signed_post_policy_v4]
+
+if __name__ == "__main__":
+ generate_signed_post_policy_v4(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_generate_signed_url_v2.py b/storage/samples/snippets/storage_generate_signed_url_v2.py
new file mode 100644
index 00000000000..9d34630f115
--- /dev/null
+++ b/storage/samples/snippets/storage_generate_signed_url_v2.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_generate_signed_url_v2]
+import datetime
+# [END storage_generate_signed_url_v2]
+import sys
+# [START storage_generate_signed_url_v2]
+
+from google.cloud import storage
+
+
+def generate_signed_url(bucket_name, blob_name):
+ """Generates a v2 signed URL for downloading a blob.
+
+ Note that this method requires a service account key file.
+ """
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ url = blob.generate_signed_url(
+ # This URL is valid for 1 hour
+ expiration=datetime.timedelta(hours=1),
+ # Allow GET requests using this URL.
+ method="GET",
+ )
+
+ print(f"The signed url for {blob.name} is {url}")
+ return url
+
+
+# [END storage_generate_signed_url_v2]
+
+if __name__ == "__main__":
+ generate_signed_url(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_generate_signed_url_v4.py b/storage/samples/snippets/storage_generate_signed_url_v4.py
new file mode 100644
index 00000000000..8825a7bb525
--- /dev/null
+++ b/storage/samples/snippets/storage_generate_signed_url_v4.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_generate_signed_url_v4]
+import datetime
+# [END storage_generate_signed_url_v4]
+import sys
+# [START storage_generate_signed_url_v4]
+
+from google.cloud import storage
+
+
+def generate_download_signed_url_v4(bucket_name, blob_name):
+ """Generates a v4 signed URL for downloading a blob.
+
+ Note that this method requires a service account key file.
+ """
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ url = blob.generate_signed_url(
+ version="v4",
+ # This URL is valid for 15 minutes
+ expiration=datetime.timedelta(minutes=15),
+ # Allow GET requests using this URL.
+ method="GET",
+ )
+
+ print("Generated GET signed URL:")
+ print(url)
+ print("You can use this URL with any user agent, for example:")
+ print(f"curl '{url}'")
+ return url
+
+
+# [END storage_generate_signed_url_v4]
+
+if __name__ == "__main__":
+ generate_download_signed_url_v4(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_generate_upload_signed_url_v4.py b/storage/samples/snippets/storage_generate_upload_signed_url_v4.py
new file mode 100644
index 00000000000..b096fe59eb7
--- /dev/null
+++ b/storage/samples/snippets/storage_generate_upload_signed_url_v4.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_generate_upload_signed_url_v4]
+import datetime
+# [END storage_generate_upload_signed_url_v4]
+import sys
+# [START storage_generate_upload_signed_url_v4]
+
+from google.cloud import storage
+
+
+def generate_upload_signed_url_v4(bucket_name, blob_name):
+ """Generates a v4 signed URL for uploading a blob using HTTP PUT.
+
+ Note that this method requires a service account key file.
+ """
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ url = blob.generate_signed_url(
+ version="v4",
+ # This URL is valid for 15 minutes
+ expiration=datetime.timedelta(minutes=15),
+ # Allow PUT requests using this URL.
+ method="PUT",
+ content_type="application/octet-stream",
+ )
+
+ print("Generated PUT signed URL:")
+ print(url)
+ print("You can use this URL with any user agent, for example:")
+ print(
+ "curl -X PUT -H 'Content-Type: application/octet-stream' "
+ "--upload-file my-file '{}'".format(url)
+ )
+ return url
+
+
+# [END storage_generate_upload_signed_url_v4]
+
+
+if __name__ == "__main__":
+ generate_upload_signed_url_v4(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_get_autoclass.py b/storage/samples/snippets/storage_get_autoclass.py
new file mode 100644
index 00000000000..30fa0c4f6b3
--- /dev/null
+++ b/storage/samples/snippets/storage_get_autoclass.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_autoclass]
+from google.cloud import storage
+
+
+def get_autoclass(bucket_name):
+ """Get the Autoclass setting for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ autoclass_enabled = bucket.autoclass_enabled
+ autoclass_toggle_time = bucket.autoclass_toggle_time
+ terminal_storage_class = bucket.autoclass_terminal_storage_class
+ tsc_update_time = bucket.autoclass_terminal_storage_class_update_time
+
+ print(f"Autoclass enabled is set to {autoclass_enabled} for {bucket.name} at {autoclass_toggle_time}.")
+ print(f"Autoclass terminal storage class is set to {terminal_storage_class} for {bucket.name} at {tsc_update_time}.")
+
+ return bucket
+
+
+# [END storage_get_autoclass]
+
+if __name__ == "__main__":
+ get_autoclass(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_bucket_encryption_enforcement_config.py b/storage/samples/snippets/storage_get_bucket_encryption_enforcement_config.py
new file mode 100644
index 00000000000..033dcc8224c
--- /dev/null
+++ b/storage/samples/snippets/storage_get_bucket_encryption_enforcement_config.py
@@ -0,0 +1,48 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_get_bucket_encryption_enforcement_config]
+from google.cloud import storage
+
+
+def get_bucket_encryption_enforcement_config(bucket_name):
+ """Gets the bucket encryption enforcement configuration."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-unique-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ print(f"Encryption Enforcement Config for bucket {bucket.name}:")
+
+ cmek_config = bucket.encryption.customer_managed_encryption_enforcement_config
+ csek_config = bucket.encryption.customer_supplied_encryption_enforcement_config
+ gmek_config = bucket.encryption.google_managed_encryption_enforcement_config
+
+ print(
+ f"Customer-managed encryption enforcement config restriction mode: {cmek_config.restriction_mode if cmek_config else None}"
+ )
+ print(
+ f"Customer-supplied encryption enforcement config restriction mode: {csek_config.restriction_mode if csek_config else None}"
+ )
+ print(
+ f"Google-managed encryption enforcement config restriction mode: {gmek_config.restriction_mode if gmek_config else None}"
+ )
+
+
+# [END storage_get_bucket_encryption_enforcement_config]
+
+
+if __name__ == "__main__":
+ get_bucket_encryption_enforcement_config(bucket_name="your-unique-bucket-name")
diff --git a/storage/samples/snippets/storage_get_bucket_labels.py b/storage/samples/snippets/storage_get_bucket_labels.py
new file mode 100644
index 00000000000..b3bcd6208b8
--- /dev/null
+++ b/storage/samples/snippets/storage_get_bucket_labels.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_get_bucket_labels]
+import pprint
+# [END storage_get_bucket_labels]
+import sys
+# [START storage_get_bucket_labels]
+
+from google.cloud import storage
+
+
+def get_bucket_labels(bucket_name):
+ """Prints out a bucket's labels."""
+ # bucket_name = 'your-bucket-name'
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+
+ labels = bucket.labels
+ pprint.pprint(labels)
+
+
+# [END storage_get_bucket_labels]
+
+if __name__ == "__main__":
+ get_bucket_labels(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_bucket_metadata.py b/storage/samples/snippets/storage_get_bucket_metadata.py
new file mode 100644
index 00000000000..c86e154de10
--- /dev/null
+++ b/storage/samples/snippets/storage_get_bucket_metadata.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sys
+
+# [START storage_get_bucket_metadata]
+
+from google.cloud import storage
+
+
+def bucket_metadata(bucket_name):
+ """Prints out a bucket's metadata."""
+ # bucket_name = 'your-bucket-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ print(f"ID: {bucket.id}")
+ print(f"Name: {bucket.name}")
+ print(f"Storage Class: {bucket.storage_class}")
+ print(f"Location: {bucket.location}")
+ print(f"Location Type: {bucket.location_type}")
+ print(f"Cors: {bucket.cors}")
+ print(f"Default Event Based Hold: {bucket.default_event_based_hold}")
+ print(f"Default KMS Key Name: {bucket.default_kms_key_name}")
+ print(f"Metageneration: {bucket.metageneration}")
+ print(
+ f"Public Access Prevention: {bucket.iam_configuration.public_access_prevention}"
+ )
+ print(f"Retention Effective Time: {bucket.retention_policy_effective_time}")
+ print(f"Retention Period: {bucket.retention_period}")
+ print(f"Retention Policy Locked: {bucket.retention_policy_locked}")
+ print(f"Object Retention Mode: {bucket.object_retention_mode}")
+ print(f"Requester Pays: {bucket.requester_pays}")
+ print(f"Self Link: {bucket.self_link}")
+ print(f"Time Created: {bucket.time_created}")
+ print(f"Versioning Enabled: {bucket.versioning_enabled}")
+ print(f"Labels: {bucket.labels}")
+
+
+# [END storage_get_bucket_metadata]
+
+if __name__ == "__main__":
+ bucket_metadata(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_default_event_based_hold.py b/storage/samples/snippets/storage_get_default_event_based_hold.py
new file mode 100644
index 00000000000..08a05f8ef55
--- /dev/null
+++ b/storage/samples/snippets/storage_get_default_event_based_hold.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_default_event_based_hold]
+from google.cloud import storage
+
+
+def get_default_event_based_hold(bucket_name):
+ """Gets the default event based hold on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+
+ if bucket.default_event_based_hold:
+ print(f"Default event-based hold is enabled for {bucket_name}")
+ else:
+ print(
+ f"Default event-based hold is not enabled for {bucket_name}"
+ )
+
+
+# [END storage_get_default_event_based_hold]
+
+
+if __name__ == "__main__":
+ get_default_event_based_hold(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_hmac_key.py b/storage/samples/snippets/storage_get_hmac_key.py
new file mode 100644
index 00000000000..82b28ff99e4
--- /dev/null
+++ b/storage/samples/snippets/storage_get_hmac_key.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_hmac_key]
+from google.cloud import storage
+
+
+def get_key(access_id, project_id):
+ """
+ Retrieve the HMACKeyMetadata with the given access id.
+ """
+ # project_id = "Your Google Cloud project ID"
+ # access_id = "ID of an HMAC key"
+
+ storage_client = storage.Client(project=project_id)
+
+ hmac_key = storage_client.get_hmac_key_metadata(
+ access_id, project_id=project_id
+ )
+
+ print("The HMAC key metadata is:")
+ print(f"Service Account Email: {hmac_key.service_account_email}")
+ print(f"Key ID: {hmac_key.id}")
+ print(f"Access ID: {hmac_key.access_id}")
+ print(f"Project ID: {hmac_key.project}")
+ print(f"State: {hmac_key.state}")
+ print(f"Created At: {hmac_key.time_created}")
+ print(f"Updated At: {hmac_key.updated}")
+ print(f"Etag: {hmac_key.etag}")
+ return hmac_key
+
+
+# [END storage_get_hmac_key]
+
+if __name__ == "__main__":
+ get_key(access_id=sys.argv[1], project_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_get_metadata.py b/storage/samples/snippets/storage_get_metadata.py
new file mode 100644
index 00000000000..1e332b44565
--- /dev/null
+++ b/storage/samples/snippets/storage_get_metadata.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_metadata]
+from google.cloud import storage
+
+
+def blob_metadata(bucket_name, blob_name):
+ """Prints out a blob's metadata."""
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Retrieve a blob, and its metadata, from Google Cloud Storage.
+ # Note that `get_blob` differs from `Bucket.blob`, which does not
+ # make an HTTP request.
+ blob = bucket.get_blob(blob_name)
+
+ print(f"Blob: {blob.name}")
+ print(f"Blob finalization: {blob.finalized_time}")
+ print(f"Bucket: {blob.bucket.name}")
+ print(f"Storage class: {blob.storage_class}")
+ print(f"ID: {blob.id}")
+ print(f"Size: {blob.size} bytes")
+ print(f"Updated: {blob.updated}")
+ print(f"Generation: {blob.generation}")
+ print(f"Metageneration: {blob.metageneration}")
+ print(f"Etag: {blob.etag}")
+ print(f"Owner: {blob.owner}")
+ print(f"Component count: {blob.component_count}")
+ print(f"Crc32c: {blob.crc32c}")
+ print(f"md5_hash: {blob.md5_hash}")
+ print(f"Cache-control: {blob.cache_control}")
+ print(f"Content-type: {blob.content_type}")
+ print(f"Content-disposition: {blob.content_disposition}")
+ print(f"Content-encoding: {blob.content_encoding}")
+ print(f"Content-language: {blob.content_language}")
+ print(f"Metadata: {blob.metadata}")
+ print(f"Medialink: {blob.media_link}")
+ print(f"Custom Time: {blob.custom_time}")
+ print("Temporary hold: ", "enabled" if blob.temporary_hold else "disabled")
+ print(
+ "Event based hold: ",
+ "enabled" if blob.event_based_hold else "disabled",
+ )
+ print(f"Retention mode: {blob.retention.mode}")
+ print(f"Retention retain until time: {blob.retention.retain_until_time}")
+ if blob.retention_expiration_time:
+ print(
+ f"retentionExpirationTime: {blob.retention_expiration_time}"
+ )
+
+
+# [END storage_get_metadata]
+
+if __name__ == "__main__":
+ blob_metadata(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_get_public_access_prevention.py b/storage/samples/snippets/storage_get_public_access_prevention.py
new file mode 100644
index 00000000000..275b84e3553
--- /dev/null
+++ b/storage/samples/snippets/storage_get_public_access_prevention.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_public_access_prevention]
+from google.cloud import storage
+
+
+def get_public_access_prevention(bucket_name):
+ """Gets the public access prevention setting (either 'inherited' or 'enforced') for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ iam_configuration = bucket.iam_configuration
+
+ print(
+ f"Public access prevention is {iam_configuration.public_access_prevention} for {bucket.name}."
+ )
+
+
+# [END storage_get_public_access_prevention]
+
+if __name__ == "__main__":
+ get_public_access_prevention(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_requester_pays_status.py b/storage/samples/snippets/storage_get_requester_pays_status.py
new file mode 100644
index 00000000000..a2eeb34d70f
--- /dev/null
+++ b/storage/samples/snippets/storage_get_requester_pays_status.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_requester_pays_status]
+from google.cloud import storage
+
+
+def get_requester_pays_status(bucket_name):
+ """Get a bucket's requester pays metadata"""
+ # bucket_name = "my-bucket"
+ storage_client = storage.Client()
+
+ bucket = storage_client.get_bucket(bucket_name)
+ requester_pays_status = bucket.requester_pays
+
+ if requester_pays_status:
+ print(f"Requester Pays is enabled for {bucket_name}")
+ else:
+ print(f"Requester Pays is disabled for {bucket_name}")
+
+
+# [END storage_get_requester_pays_status]
+
+if __name__ == "__main__":
+ get_requester_pays_status(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_retention_policy.py b/storage/samples/snippets/storage_get_retention_policy.py
new file mode 100644
index 00000000000..215f80d5a59
--- /dev/null
+++ b/storage/samples/snippets/storage_get_retention_policy.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_retention_policy]
+from google.cloud import storage
+
+
+def get_retention_policy(bucket_name):
+ """Gets the retention policy on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ bucket.reload()
+
+ print(f"Retention Policy for {bucket_name}")
+ print(f"Retention Period: {bucket.retention_period}")
+ if bucket.retention_policy_locked:
+ print("Retention Policy is locked")
+
+ if bucket.retention_policy_effective_time:
+ print(
+ f"Effective Time: {bucket.retention_policy_effective_time}"
+ )
+
+
+# [END storage_get_retention_policy]
+
+
+if __name__ == "__main__":
+ get_retention_policy(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_rpo.py b/storage/samples/snippets/storage_get_rpo.py
new file mode 100644
index 00000000000..ab40ca3a5f4
--- /dev/null
+++ b/storage/samples/snippets/storage_get_rpo.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that gets RPO (Recovery Point Objective) of a bucket
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/managing-turbo-replication
+For more information, see README.md.
+"""
+
+# [START storage_get_rpo]
+
+from google.cloud import storage
+
+
+def get_rpo(bucket_name):
+ """Gets the RPO of the bucket"""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ rpo = bucket.rpo
+
+ print(f"RPO for {bucket.name} is {rpo}.")
+
+
+# [END storage_get_rpo]
+
+if __name__ == "__main__":
+ get_rpo(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_service_account.py b/storage/samples/snippets/storage_get_service_account.py
new file mode 100644
index 00000000000..5ac0e563835
--- /dev/null
+++ b/storage/samples/snippets/storage_get_service_account.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_get_service_account]
+from google.cloud import storage
+
+
+def get_service_account():
+ """Get the service account email"""
+ storage_client = storage.Client()
+
+ email = storage_client.get_service_account_email()
+ print(
+ f"The GCS service account for project {storage_client.project} is: {email} "
+ )
+
+
+# [END storage_get_service_account]
+
+if __name__ == "__main__":
+ get_service_account()
diff --git a/storage/samples/snippets/storage_get_soft_delete_policy.py b/storage/samples/snippets/storage_get_soft_delete_policy.py
new file mode 100644
index 00000000000..99c4e572a24
--- /dev/null
+++ b/storage/samples/snippets/storage_get_soft_delete_policy.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_soft_delete_policy]
+from google.cloud import storage
+
+
+def get_soft_delete_policy(bucket_name):
+ """Gets the soft-delete policy of the bucket"""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ print(f"Soft-delete policy for {bucket_name}")
+ if (
+ bucket.soft_delete_policy
+ and bucket.soft_delete_policy.retention_duration_seconds
+ ):
+ print("Object soft-delete policy is enabled")
+ print(
+ f"Object retention duration: {bucket.soft_delete_policy.retention_duration_seconds} seconds"
+ )
+ print(f"Policy effective time: {bucket.soft_delete_policy.effective_time}")
+ else:
+ print("Object soft-delete policy is disabled")
+
+
+# [END storage_get_soft_delete_policy]
+
+if __name__ == "__main__":
+ get_soft_delete_policy(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_get_soft_deleted_bucket.py b/storage/samples/snippets/storage_get_soft_deleted_bucket.py
new file mode 100644
index 00000000000..2b795504657
--- /dev/null
+++ b/storage/samples/snippets/storage_get_soft_deleted_bucket.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sys
+
+# [START storage_get_soft_deleted_bucket]
+
+from google.cloud import storage
+
+
+def get_soft_deleted_bucket(bucket_name, generation):
+ """Prints out a soft-deleted bucket's metadata.
+
+ Args:
+ bucket_name: str
+ The name of the bucket to get.
+
+ generation:
+ The generation of the bucket.
+
+ """
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name, soft_deleted=True, generation=generation)
+
+ print(f"ID: {bucket.id}")
+ print(f"Name: {bucket.name}")
+ print(f"Soft Delete time: {bucket.soft_delete_time}")
+ print(f"Hard Delete Time : {bucket.hard_delete_time}")
+
+
+# [END storage_get_soft_deleted_bucket]
+
+if __name__ == "__main__":
+ get_soft_deleted_bucket(bucket_name=sys.argv[1], generation=sys.argv[2])
diff --git a/storage/samples/snippets/storage_get_uniform_bucket_level_access.py b/storage/samples/snippets/storage_get_uniform_bucket_level_access.py
new file mode 100644
index 00000000000..206b9f1ff11
--- /dev/null
+++ b/storage/samples/snippets/storage_get_uniform_bucket_level_access.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_get_uniform_bucket_level_access]
+from google.cloud import storage
+
+
+def get_uniform_bucket_level_access(bucket_name):
+ """Get uniform bucket-level access for a bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ iam_configuration = bucket.iam_configuration
+
+ if iam_configuration.uniform_bucket_level_access_enabled:
+ print(
+ f"Uniform bucket-level access is enabled for {bucket.name}."
+ )
+ print(
+ "Bucket will be locked on {}.".format(
+ iam_configuration.uniform_bucket_level_locked_time
+ )
+ )
+ else:
+ print(
+ f"Uniform bucket-level access is disabled for {bucket.name}."
+ )
+
+
+# [END storage_get_uniform_bucket_level_access]
+
+if __name__ == "__main__":
+ get_uniform_bucket_level_access(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_list_bucket_notifications.py b/storage/samples/snippets/storage_list_bucket_notifications.py
new file mode 100644
index 00000000000..0d25138bc90
--- /dev/null
+++ b/storage/samples/snippets/storage_list_bucket_notifications.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that lists notification configurations for a bucket.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/reporting-changes
+For more information, see README.md.
+"""
+
+# [START storage_list_bucket_notifications]
+from google.cloud import storage
+
+
+def list_bucket_notifications(bucket_name):
+ """Lists notification configurations for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ notifications = bucket.list_notifications()
+
+ for notification in notifications:
+ print(f"Notification ID: {notification.notification_id}")
+
+# [END storage_list_bucket_notifications]
+
+
+if __name__ == "__main__":
+ list_bucket_notifications(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_list_buckets.py b/storage/samples/snippets/storage_list_buckets.py
new file mode 100644
index 00000000000..f5897e47a42
--- /dev/null
+++ b/storage/samples/snippets/storage_list_buckets.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_list_buckets]
+from google.cloud import storage
+
+
+def list_buckets():
+ """Lists all buckets."""
+
+ storage_client = storage.Client()
+ buckets = storage_client.list_buckets()
+
+ for bucket in buckets:
+ print(bucket.name)
+
+
+# [END storage_list_buckets]
+
+
+if __name__ == "__main__":
+ list_buckets()
diff --git a/storage/samples/snippets/storage_list_buckets_partial_success.py b/storage/samples/snippets/storage_list_buckets_partial_success.py
new file mode 100644
index 00000000000..bea4c9ed35c
--- /dev/null
+++ b/storage/samples/snippets/storage_list_buckets_partial_success.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_list_buckets_partial_success]
+from google.cloud import storage
+
+
+def list_buckets_with_partial_success():
+ """Lists buckets and includes unreachable buckets in the response."""
+
+ storage_client = storage.Client()
+
+ buckets_iterator = storage_client.list_buckets(return_partial_success=True)
+
+ for page in buckets_iterator.pages:
+ if page.unreachable:
+ print("Unreachable locations in this page:")
+ for location in page.unreachable:
+ print(location)
+
+ print("Reachable buckets in this page:")
+ for bucket in page:
+ print(bucket.name)
+
+
+# [END storage_list_buckets_partial_success]
+
+
+if __name__ == "__main__":
+ list_buckets_with_partial_success()
diff --git a/storage/samples/snippets/storage_list_file_archived_generations.py b/storage/samples/snippets/storage_list_file_archived_generations.py
new file mode 100644
index 00000000000..419cc3da408
--- /dev/null
+++ b/storage/samples/snippets/storage_list_file_archived_generations.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_file_archived_generations]
+from google.cloud import storage
+
+
+def list_file_archived_generations(bucket_name):
+ """Lists all the blobs in the bucket with generation."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ blobs = storage_client.list_blobs(bucket_name, versions=True)
+
+ for blob in blobs:
+ print(f"{blob.name},{blob.generation}")
+
+
+# [END storage_list_file_archived_generations]
+
+
+if __name__ == "__main__":
+ list_file_archived_generations(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_list_files.py b/storage/samples/snippets/storage_list_files.py
new file mode 100644
index 00000000000..5e80c833afe
--- /dev/null
+++ b/storage/samples/snippets/storage_list_files.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_files]
+from google.cloud import storage
+
+
+def list_blobs(bucket_name):
+ """Lists all the blobs in the bucket."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+
+ # Note: Client.list_blobs requires at least package version 1.17.0.
+ blobs = storage_client.list_blobs(bucket_name)
+
+ # Note: The call returns a response only when the iterator is consumed.
+ for blob in blobs:
+ print(blob.name)
+
+
+# [END storage_list_files]
+
+
+if __name__ == "__main__":
+ list_blobs(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_list_files_with_prefix.py b/storage/samples/snippets/storage_list_files_with_prefix.py
new file mode 100644
index 00000000000..7f877d1d6bc
--- /dev/null
+++ b/storage/samples/snippets/storage_list_files_with_prefix.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_files_with_prefix]
+from google.cloud import storage
+
+
+def list_blobs_with_prefix(bucket_name, prefix, delimiter=None):
+ """Lists all the blobs in the bucket that begin with the prefix.
+
+ This can be used to list all blobs in a "folder", e.g. "public/".
+
+ The delimiter argument can be used to restrict the results to only the
+ "files" in the given "folder". Without the delimiter, the entire tree under
+ the prefix is returned. For example, given these blobs:
+
+ a/1.txt
+ a/b/2.txt
+
+ If you specify prefix ='a/', without a delimiter, you'll get back:
+
+ a/1.txt
+ a/b/2.txt
+
+ However, if you specify prefix='a/' and delimiter='/', you'll get back
+ only the file directly under 'a/':
+
+ a/1.txt
+
+ As part of the response, you'll also get back a blobs.prefixes entity
+ that lists the "subfolders" under `a/`:
+
+ a/b/
+
+
+ Note: If you only want to list prefixes a/b/ and don't want to iterate over
+ blobs, you can do
+
+ ```
+ for page in blobs.pages:
+ print(page.prefixes)
+ ```
+ """
+
+ storage_client = storage.Client()
+
+ # Note: Client.list_blobs requires at least package version 1.17.0.
+ blobs = storage_client.list_blobs(
+ bucket_name, prefix=prefix, delimiter=delimiter
+ )
+
+ # Note: The call returns a response only when the iterator is consumed.
+ print("Blobs:")
+ for blob in blobs:
+ print(blob.name)
+
+ if delimiter:
+ print("Prefixes:")
+ for prefix in blobs.prefixes:
+ print(prefix)
+
+
+# [END storage_list_files_with_prefix]
+
+if __name__ == "__main__":
+ list_blobs_with_prefix(
+ bucket_name=sys.argv[1], prefix=sys.argv[2], delimiter=sys.argv[3]
+ )
diff --git a/storage/samples/snippets/storage_list_hmac_keys.py b/storage/samples/snippets/storage_list_hmac_keys.py
new file mode 100644
index 00000000000..a09616fa519
--- /dev/null
+++ b/storage/samples/snippets/storage_list_hmac_keys.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_hmac_keys]
+from google.cloud import storage
+
+
+def list_keys(project_id):
+ """
+ List all HMAC keys associated with the project.
+ """
+ # project_id = "Your Google Cloud project ID"
+
+ storage_client = storage.Client(project=project_id)
+ hmac_keys = storage_client.list_hmac_keys(project_id=project_id)
+ print("HMAC Keys:")
+ for hmac_key in hmac_keys:
+ print(
+ f"Service Account Email: {hmac_key.service_account_email}"
+ )
+ print(f"Access ID: {hmac_key.access_id}")
+ return hmac_keys
+
+
+# [END storage_list_hmac_keys]
+
+if __name__ == "__main__":
+ list_keys(project_id=sys.argv[1])
diff --git a/storage/samples/snippets/storage_list_soft_deleted_buckets.py b/storage/samples/snippets/storage_list_soft_deleted_buckets.py
new file mode 100644
index 00000000000..16abd90f02a
--- /dev/null
+++ b/storage/samples/snippets/storage_list_soft_deleted_buckets.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_list_soft_deleted_buckets]
+
+from google.cloud import storage
+
+
+def list_soft_deleted_buckets():
+ """Lists all soft-deleted buckets."""
+
+ storage_client = storage.Client()
+ buckets = storage_client.list_buckets(soft_deleted=True)
+
+ for bucket in buckets:
+ print(bucket.name)
+
+
+# [END storage_list_soft_deleted_buckets]
+
+
+if __name__ == "__main__":
+ list_soft_deleted_buckets()
diff --git a/storage/samples/snippets/storage_list_soft_deleted_object_versions.py b/storage/samples/snippets/storage_list_soft_deleted_object_versions.py
new file mode 100644
index 00000000000..ecb9851c454
--- /dev/null
+++ b/storage/samples/snippets/storage_list_soft_deleted_object_versions.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_soft_deleted_object_versions]
+from google.cloud import storage
+
+
+def list_soft_deleted_object_versions(bucket_name, blob_name):
+ """Lists all versions of a soft-deleted object in the bucket."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+ blobs = storage_client.list_blobs(bucket_name, prefix=blob_name, soft_deleted=True)
+
+ # Note: The call returns a response only when the iterator is consumed.
+ for blob in blobs:
+ print(
+ f"Version ID: {blob.generation}, Soft Delete Time: {blob.soft_delete_time}"
+ )
+
+
+# [END storage_list_soft_deleted_object_versions]
+
+if __name__ == "__main__":
+ list_soft_deleted_object_versions(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_list_soft_deleted_objects.py b/storage/samples/snippets/storage_list_soft_deleted_objects.py
new file mode 100644
index 00000000000..764cac56a6d
--- /dev/null
+++ b/storage/samples/snippets/storage_list_soft_deleted_objects.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_list_soft_deleted_objects]
+from google.cloud import storage
+
+
+def list_soft_deleted_objects(bucket_name):
+ """Lists all soft-deleted objects in the bucket."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ blobs = storage_client.list_blobs(bucket_name, soft_deleted=True)
+
+ # Note: The call returns a response only when the iterator is consumed.
+ for blob in blobs:
+ print(
+ f"Name: {blob.name}, Generation: {blob.generation}, Soft Delete Time: {blob.soft_delete_time}"
+ )
+
+
+# [END storage_list_soft_deleted_objects]
+
+if __name__ == "__main__":
+ list_soft_deleted_objects(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_lock_retention_policy.py b/storage/samples/snippets/storage_lock_retention_policy.py
new file mode 100644
index 00000000000..adff364d749
--- /dev/null
+++ b/storage/samples/snippets/storage_lock_retention_policy.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_lock_retention_policy]
+from google.cloud import storage
+
+
+def lock_retention_policy(bucket_name):
+ """Locks the retention policy on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ # get_bucket gets the current metageneration value for the bucket,
+ # required by lock_retention_policy.
+ bucket = storage_client.get_bucket(bucket_name)
+
+ # Warning: Once a retention policy is locked it cannot be unlocked
+ # and retention period can only be increased.
+ bucket.lock_retention_policy()
+
+ print(f"Retention policy for {bucket_name} is now locked")
+ print(
+ f"Retention policy effective as of {bucket.retention_policy_effective_time}"
+ )
+
+
+# [END storage_lock_retention_policy]
+
+
+if __name__ == "__main__":
+ lock_retention_policy(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_make_public.py b/storage/samples/snippets/storage_make_public.py
new file mode 100644
index 00000000000..489508cf674
--- /dev/null
+++ b/storage/samples/snippets/storage_make_public.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_make_public]
+from google.cloud import storage
+
+
+def make_blob_public(bucket_name, blob_name):
+ """Makes a blob publicly accessible."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ blob.make_public()
+
+ print(
+ f"Blob {blob.name} is publicly accessible at {blob.public_url}"
+ )
+
+
+# [END storage_make_public]
+
+if __name__ == "__main__":
+ make_blob_public(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_move_file.py b/storage/samples/snippets/storage_move_file.py
new file mode 100644
index 00000000000..b2e5144d0b2
--- /dev/null
+++ b/storage/samples/snippets/storage_move_file.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_move_file]
+from google.cloud import storage
+
+
+def move_blob(bucket_name, blob_name, destination_bucket_name, destination_blob_name,):
+ """Moves a blob from one bucket to another with a new name."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of your GCS object
+ # blob_name = "your-object-name"
+ # The ID of the bucket to move the object to
+ # destination_bucket_name = "destination-bucket-name"
+ # The ID of your new GCS object (optional)
+ # destination_blob_name = "destination-object-name"
+
+ storage_client = storage.Client()
+
+ source_bucket = storage_client.bucket(bucket_name)
+ source_blob = source_bucket.blob(blob_name)
+ destination_bucket = storage_client.bucket(destination_bucket_name)
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ # There is also an `if_source_generation_match` parameter, which is not used in this example.
+ destination_generation_match_precondition = 0
+
+ blob_copy = source_bucket.copy_blob(
+ source_blob, destination_bucket, destination_blob_name, if_generation_match=destination_generation_match_precondition,
+ )
+ source_bucket.delete_blob(blob_name)
+
+ print(
+ "Blob {} in bucket {} moved to blob {} in bucket {}.".format(
+ source_blob.name,
+ source_bucket.name,
+ blob_copy.name,
+ destination_bucket.name,
+ )
+ )
+
+
+# [END storage_move_file]
+
+if __name__ == "__main__":
+ move_blob(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ destination_bucket_name=sys.argv[3],
+ destination_blob_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_move_file_atomically.py b/storage/samples/snippets/storage_move_file_atomically.py
new file mode 100644
index 00000000000..d659cf3661a
--- /dev/null
+++ b/storage/samples/snippets/storage_move_file_atomically.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_move_object]
+from google.cloud import storage
+
+
+def move_object(bucket_name: str, blob_name: str, new_blob_name: str) -> None:
+ """Moves a blob to a new name within the same bucket using the move API."""
+ # The name of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The name of your GCS object to move
+ # blob_name = "your-file-name"
+
+ # The new name of the GCS object
+ # new_blob_name = "new-file-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ blob_to_move = bucket.blob(blob_name)
+
+ # Use move_blob to perform an efficient, server-side move.
+ moved_blob = bucket.move_blob(
+ blob=blob_to_move, new_name=new_blob_name
+ )
+
+ print(f"Blob {blob_to_move.name} has been moved to {moved_blob.name}.")
+
+
+# [END storage_move_object]
+
+if __name__ == "__main__":
+ move_object(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ new_blob_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_object_csek_to_cmek.py b/storage/samples/snippets/storage_object_csek_to_cmek.py
new file mode 100644
index 00000000000..9a915f08d63
--- /dev/null
+++ b/storage/samples/snippets/storage_object_csek_to_cmek.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import sys
+
+# [START storage_object_csek_to_cmek]
+from google.cloud import storage
+
+
+def object_csek_to_cmek(bucket_name, blob_name, encryption_key, kms_key_name):
+ """Change a blob's customer-supplied encryption key to KMS key"""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # encryption_key = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g="
+ # kms_key_name = "projects/PROJ/locations/LOC/keyRings/RING/cryptoKey/KEY"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ current_encryption_key = base64.b64decode(encryption_key)
+ source_blob = bucket.blob(blob_name, encryption_key=current_encryption_key)
+ destination_blob = bucket.blob(blob_name, kms_key_name=kms_key_name)
+ generation_match_precondition = None
+ token = None
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to rewrite is aborted if the object's
+ # generation number does not match your precondition.
+ source_blob.reload() # Fetch blob metadata to use in generation_match_precondition.
+ generation_match_precondition = source_blob.generation
+
+ while True:
+ token, bytes_rewritten, total_bytes = destination_blob.rewrite(
+ source_blob, token=token, if_generation_match=generation_match_precondition
+ )
+ if token is None:
+ break
+
+ print(
+ "Blob {} in bucket {} is now managed by the KMS key {} instead of a customer-supplied encryption key".format(
+ blob_name, bucket_name, kms_key_name
+ )
+ )
+ return destination_blob
+
+
+# [END storage_object_csek_to_cmek]
+
+if __name__ == "__main__":
+ object_csek_to_cmek(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ encryption_key=sys.argv[3],
+ kms_key_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_object_get_kms_key.py b/storage/samples/snippets/storage_object_get_kms_key.py
new file mode 100644
index 00000000000..7604e6eba6e
--- /dev/null
+++ b/storage/samples/snippets/storage_object_get_kms_key.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_object_get_kms_key]
+from google.cloud import storage
+
+
+def object_get_kms_key(bucket_name, blob_name):
+ """Retrieve the KMS key of a blob"""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+
+ storage_client = storage.Client()
+
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.get_blob(blob_name)
+
+ kms_key = blob.kms_key_name
+
+ print(f"The KMS key of a blob is {blob.kms_key_name}")
+ return kms_key
+
+
+# [END storage_object_get_kms_key]
+
+if __name__ == "__main__":
+ object_get_kms_key(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_print_bucket_acl.py b/storage/samples/snippets/storage_print_bucket_acl.py
new file mode 100644
index 00000000000..55417f1bc77
--- /dev/null
+++ b/storage/samples/snippets/storage_print_bucket_acl.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_print_bucket_acl]
+from google.cloud import storage
+
+
+def print_bucket_acl(bucket_name):
+ """Prints out a bucket's access control list."""
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ for entry in bucket.acl:
+ print(f"{entry['role']}: {entry['entity']}")
+
+
+# [END storage_print_bucket_acl]
+
+if __name__ == "__main__":
+ print_bucket_acl(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_print_bucket_acl_for_user.py b/storage/samples/snippets/storage_print_bucket_acl_for_user.py
new file mode 100644
index 00000000000..fa786d03af9
--- /dev/null
+++ b/storage/samples/snippets/storage_print_bucket_acl_for_user.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_print_bucket_acl_for_user]
+from google.cloud import storage
+
+
+def print_bucket_acl_for_user(bucket_name, user_email):
+ """Prints out a bucket's access control list for a given user."""
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ bucket.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # get the roles for different types of entities.
+ roles = bucket.acl.user(user_email).get_roles()
+
+ print(roles)
+
+
+# [END storage_print_bucket_acl_for_user]
+
+if __name__ == "__main__":
+ print_bucket_acl_for_user(bucket_name=sys.argv[1], user_email=sys.argv[2])
diff --git a/storage/samples/snippets/storage_print_file_acl.py b/storage/samples/snippets/storage_print_file_acl.py
new file mode 100644
index 00000000000..8dfc4e98464
--- /dev/null
+++ b/storage/samples/snippets/storage_print_file_acl.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_print_file_acl]
+from google.cloud import storage
+
+
+def print_blob_acl(bucket_name, blob_name):
+ """Prints out a blob's access control list."""
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ for entry in blob.acl:
+ print(f"{entry['role']}: {entry['entity']}")
+
+
+# [END storage_print_file_acl]
+
+if __name__ == "__main__":
+ print_blob_acl(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_print_file_acl_for_user.py b/storage/samples/snippets/storage_print_file_acl_for_user.py
new file mode 100644
index 00000000000..e399b916013
--- /dev/null
+++ b/storage/samples/snippets/storage_print_file_acl_for_user.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_print_file_acl_for_user]
+from google.cloud import storage
+
+
+def print_blob_acl_for_user(bucket_name, blob_name, user_email):
+ """Prints out a blob's access control list for a given user."""
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ blob.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # get the roles for different types of entities.
+ roles = blob.acl.user(user_email).get_roles()
+
+ print(roles)
+
+
+# [END storage_print_file_acl_for_user]
+
+if __name__ == "__main__":
+ print_blob_acl_for_user(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2], user_email=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_print_pubsub_bucket_notification.py b/storage/samples/snippets/storage_print_pubsub_bucket_notification.py
new file mode 100644
index 00000000000..3df45dc1f57
--- /dev/null
+++ b/storage/samples/snippets/storage_print_pubsub_bucket_notification.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that gets a notification configuration for a bucket.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/reporting-changes
+For more information, see README.md.
+"""
+
+# [START storage_print_pubsub_bucket_notification]
+from google.cloud import storage
+
+
+def print_pubsub_bucket_notification(bucket_name, notification_id):
+ """Gets a notification configuration for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of the notification
+ # notification_id = "your-notification-id"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ notification = bucket.get_notification(notification_id)
+
+ print(f"Notification ID: {notification.notification_id}")
+ print(f"Topic Name: {notification.topic_name}")
+ print(f"Event Types: {notification.event_types}")
+ print(f"Custom Attributes: {notification.custom_attributes}")
+ print(f"Payload Format: {notification.payload_format}")
+ print(f"Blob Name Prefix: {notification.blob_name_prefix}")
+ print(f"Etag: {notification.etag}")
+ print(f"Self Link: {notification.self_link}")
+
+# [END storage_print_pubsub_bucket_notification]
+
+
+if __name__ == "__main__":
+ print_pubsub_bucket_notification(bucket_name=sys.argv[1], notification_id=sys.argv[2])
diff --git a/storage/samples/snippets/storage_release_event_based_hold.py b/storage/samples/snippets/storage_release_event_based_hold.py
new file mode 100644
index 00000000000..6b4a2ccb51c
--- /dev/null
+++ b/storage/samples/snippets/storage_release_event_based_hold.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_release_event_based_hold]
+from google.cloud import storage
+
+
+def release_event_based_hold(bucket_name, blob_name):
+ """Releases the event based hold on a given blob"""
+
+ # bucket_name = "my-bucket"
+ # blob_name = "my-blob"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ metageneration_match_precondition = None
+
+ # Optional: set a metageneration-match precondition to avoid potential race
+ # conditions and data corruptions. The request to patch is aborted if the
+ # object's metageneration does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in metageneration_match_precondition.
+ metageneration_match_precondition = blob.metageneration
+
+ blob.event_based_hold = False
+ blob.patch(if_metageneration_match=metageneration_match_precondition)
+
+ print(f"Event based hold was released for {blob_name}")
+
+
+# [END storage_release_event_based_hold]
+
+
+if __name__ == "__main__":
+ release_event_based_hold(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_release_temporary_hold.py b/storage/samples/snippets/storage_release_temporary_hold.py
new file mode 100644
index 00000000000..64c7607c182
--- /dev/null
+++ b/storage/samples/snippets/storage_release_temporary_hold.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_release_temporary_hold]
+from google.cloud import storage
+
+
+def release_temporary_hold(bucket_name, blob_name):
+ """Releases the temporary hold on a given blob"""
+
+ # bucket_name = "my-bucket"
+ # blob_name = "my-blob"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ metageneration_match_precondition = None
+
+ # Optional: set a metageneration-match precondition to avoid potential race
+ # conditions and data corruptions. The request to patch is aborted if the
+ # object's metageneration does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in metageneration_match_precondition.
+ metageneration_match_precondition = blob.metageneration
+
+ blob.temporary_hold = False
+ blob.patch(if_metageneration_match=metageneration_match_precondition)
+
+ print("Temporary hold was release for #{blob_name}")
+
+
+# [END storage_release_temporary_hold]
+
+
+if __name__ == "__main__":
+ release_temporary_hold(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_remove_bucket_conditional_iam_binding.py b/storage/samples/snippets/storage_remove_bucket_conditional_iam_binding.py
new file mode 100644
index 00000000000..242544d8ed2
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_bucket_conditional_iam_binding.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_bucket_conditional_iam_binding]
+from google.cloud import storage
+
+
+def remove_bucket_conditional_iam_binding(
+ bucket_name, role, title, description, expression
+):
+ """Remove a conditional IAM binding from a bucket's IAM policy."""
+ # bucket_name = "your-bucket-name"
+ # role = "IAM role, e.g. roles/storage.objectViewer"
+ # title = "Condition title."
+ # description = "Condition description."
+ # expression = "Condition expression."
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+
+ # Set the policy's version to 3 to use condition in bindings.
+ policy.version = 3
+
+ condition = {
+ "title": title,
+ "description": description,
+ "expression": expression,
+ }
+ policy.bindings = [
+ binding
+ for binding in policy.bindings
+ if not (binding["role"] == role and binding.get("condition") == condition)
+ ]
+
+ bucket.set_iam_policy(policy)
+
+ print("Conditional Binding was removed.")
+
+
+# [END storage_remove_bucket_conditional_iam_binding]
+
+
+if __name__ == "__main__":
+ remove_bucket_conditional_iam_binding(
+ bucket_name=sys.argv[1],
+ role=sys.argv[2],
+ title=sys.argv[3],
+ description=sys.argv[4],
+ expression=sys.argv[5],
+ )
diff --git a/storage/samples/snippets/storage_remove_bucket_default_owner.py b/storage/samples/snippets/storage_remove_bucket_default_owner.py
new file mode 100644
index 00000000000..e6f3c495e5f
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_bucket_default_owner.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_bucket_default_owner]
+from google.cloud import storage
+
+
+def remove_bucket_default_owner(bucket_name, user_email):
+ """Removes a user from the access control list of the given bucket's
+ default object access control list."""
+ # bucket_name = "your-bucket-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ bucket.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # remove access for different types of entities.
+ bucket.default_object_acl.user(user_email).revoke_read()
+ bucket.default_object_acl.user(user_email).revoke_write()
+ bucket.default_object_acl.user(user_email).revoke_owner()
+ bucket.default_object_acl.save()
+
+ print(
+ f"Removed user {user_email} from the default acl of bucket {bucket_name}."
+ )
+
+
+# [END storage_remove_bucket_default_owner]
+
+if __name__ == "__main__":
+ remove_bucket_default_owner(
+ bucket_name=sys.argv[1], user_email=sys.argv[2]
+ )
diff --git a/storage/samples/snippets/storage_remove_bucket_iam_member.py b/storage/samples/snippets/storage_remove_bucket_iam_member.py
new file mode 100644
index 00000000000..2efc29e303c
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_bucket_iam_member.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_bucket_iam_member]
+from google.cloud import storage
+
+
+def remove_bucket_iam_member(bucket_name, role, member):
+ """Remove member from bucket IAM Policy"""
+ # bucket_name = "your-bucket-name"
+ # role = "IAM role, e.g. roles/storage.objectViewer"
+ # member = "IAM identity, e.g. user: name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+
+ for binding in policy.bindings:
+ print(binding)
+ if binding["role"] == role and binding.get("condition") is None:
+ binding["members"].discard(member)
+
+ bucket.set_iam_policy(policy)
+
+ print(f"Removed {member} with role {role} from {bucket_name}.")
+
+
+# [END storage_remove_bucket_iam_member]
+
+if __name__ == "__main__":
+ remove_bucket_iam_member(
+ bucket_name=sys.argv[1], role=sys.argv[2], member=sys.argv[3]
+ )
diff --git a/storage/samples/snippets/storage_remove_bucket_label.py b/storage/samples/snippets/storage_remove_bucket_label.py
new file mode 100644
index 00000000000..fc4a5b4e7b2
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_bucket_label.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_remove_bucket_label]
+import pprint
+# [END storage_remove_bucket_label]
+import sys
+# [START storage_remove_bucket_label]
+
+from google.cloud import storage
+
+
+def remove_bucket_label(bucket_name):
+ """Remove a label from a bucket."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ labels = bucket.labels
+
+ if "example" in labels:
+ del labels["example"]
+
+ bucket.labels = labels
+ bucket.patch()
+
+ print(f"Removed labels on {bucket.name}.")
+ pprint.pprint(bucket.labels)
+
+
+# [END storage_remove_bucket_label]
+
+if __name__ == "__main__":
+ remove_bucket_label(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_remove_bucket_owner.py b/storage/samples/snippets/storage_remove_bucket_owner.py
new file mode 100644
index 00000000000..561ba9175a6
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_bucket_owner.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_bucket_owner]
+from google.cloud import storage
+
+
+def remove_bucket_owner(bucket_name, user_email):
+ """Removes a user from the access control list of the given bucket."""
+ # bucket_name = "your-bucket-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Reload fetches the current ACL from Cloud Storage.
+ bucket.acl.reload()
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # remove access for different types of entities.
+ bucket.acl.user(user_email).revoke_read()
+ bucket.acl.user(user_email).revoke_write()
+ bucket.acl.user(user_email).revoke_owner()
+ bucket.acl.save()
+
+ print(f"Removed user {user_email} from bucket {bucket_name}.")
+
+
+# [END storage_remove_bucket_owner]
+
+if __name__ == "__main__":
+ remove_bucket_owner(bucket_name=sys.argv[1], user_email=sys.argv[2])
diff --git a/storage/samples/snippets/storage_remove_cors_configuration.py b/storage/samples/snippets/storage_remove_cors_configuration.py
new file mode 100644
index 00000000000..ad97371f494
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_cors_configuration.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_cors_configuration]
+from google.cloud import storage
+
+
+def remove_cors_configuration(bucket_name):
+ """Remove a bucket's CORS policies configuration."""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.cors = []
+ bucket.patch()
+
+ print(f"Remove CORS policies for bucket {bucket.name}.")
+ return bucket
+
+
+# [END storage_remove_cors_configuration]
+
+if __name__ == "__main__":
+ remove_cors_configuration(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_remove_file_owner.py b/storage/samples/snippets/storage_remove_file_owner.py
new file mode 100644
index 00000000000..315a747adbc
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_file_owner.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_file_owner]
+from google.cloud import storage
+
+
+def remove_blob_owner(bucket_name, blob_name, user_email):
+ """Removes a user from the access control list of the given blob in the
+ given bucket."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # user_email = "name@example.com"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ # You can also use `group`, `domain`, `all_authenticated` and `all` to
+ # remove access for different types of entities.
+ blob.acl.user(user_email).revoke_read()
+ blob.acl.user(user_email).revoke_write()
+ blob.acl.user(user_email).revoke_owner()
+ blob.acl.save()
+
+ print(
+ f"Removed user {user_email} from blob {blob_name} in bucket {bucket_name}."
+ )
+
+
+# [END storage_remove_file_owner]
+
+if __name__ == "__main__":
+ remove_blob_owner(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2], user_email=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_remove_retention_policy.py b/storage/samples/snippets/storage_remove_retention_policy.py
new file mode 100644
index 00000000000..9ede8053afd
--- /dev/null
+++ b/storage/samples/snippets/storage_remove_retention_policy.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_remove_retention_policy]
+from google.cloud import storage
+
+
+def remove_retention_policy(bucket_name):
+ """Removes the retention policy on a given bucket"""
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ bucket.reload()
+
+ if bucket.retention_policy_locked:
+ print(
+ "Unable to remove retention period as retention policy is locked."
+ )
+ return
+
+ bucket.retention_period = None
+ bucket.patch()
+
+ print(f"Removed bucket {bucket.name} retention policy")
+
+
+# [END storage_remove_retention_policy]
+
+
+if __name__ == "__main__":
+ remove_retention_policy(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_rename_file.py b/storage/samples/snippets/storage_rename_file.py
new file mode 100644
index 00000000000..1125007c655
--- /dev/null
+++ b/storage/samples/snippets/storage_rename_file.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_rename_file]
+from google.cloud import storage
+
+
+def rename_blob(bucket_name, blob_name, new_name):
+ """Renames a blob."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of the GCS object to rename
+ # blob_name = "your-object-name"
+ # The new ID of the GCS object
+ # new_name = "new-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ new_blob = bucket.rename_blob(blob, new_name)
+
+ print(f"Blob {blob.name} has been renamed to {new_blob.name}")
+
+
+# [END storage_rename_file]
+
+if __name__ == "__main__":
+ rename_blob(bucket_name=sys.argv[1], blob_name=sys.argv[2], new_name=sys.argv[3])
diff --git a/storage/samples/snippets/storage_restore_object.py b/storage/samples/snippets/storage_restore_object.py
new file mode 100644
index 00000000000..d1e3f29372c
--- /dev/null
+++ b/storage/samples/snippets/storage_restore_object.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sys
+
+# [START storage_restore_object]
+from google.cloud import storage
+
+
+def restore_soft_deleted_object(bucket_name, blob_name, blob_generation):
+ """Restores a soft-deleted object in the bucket."""
+ # bucket_name = "your-bucket-name"
+ # blob_name = "your-object-name"
+ # blob_generation = "your-object-version-id"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Restore function will override if a live object already
+ # exists with the same name.
+ bucket.restore_blob(blob_name, generation=blob_generation)
+
+ print(
+ f"Soft-deleted object {blob_name} is restored in the bucket {bucket_name}"
+ )
+
+
+# [END storage_restore_object]
+
+if __name__ == "__main__":
+ restore_soft_deleted_object(
+ bucket_name=sys.argv[1], blob_name=sys.argv[2], blob_generation=sys.argv[3]
+ )
diff --git a/storage/samples/snippets/storage_restore_soft_deleted_bucket.py b/storage/samples/snippets/storage_restore_soft_deleted_bucket.py
new file mode 100644
index 00000000000..fb62919978e
--- /dev/null
+++ b/storage/samples/snippets/storage_restore_soft_deleted_bucket.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import sys
+
+# [START storage_restore_soft_deleted_bucket]
+
+from google.cloud import storage
+
+
+def restore_bucket(bucket_name, bucket_generation):
+ storage_client = storage.Client()
+ bucket = storage_client.restore_bucket(bucket_name=bucket_name, generation=bucket_generation)
+ print(f"Soft-deleted bucket {bucket.name} with ID: {bucket.id} was restored.")
+ print(f"Bucket Generation: {bucket.generation}")
+
+
+# [END storage_restore_soft_deleted_bucket]
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print("Wrong inputs!! Usage of script - \"python storage_restore_soft_deleted_bucket.py \" ")
+ sys.exit(1)
+ restore_bucket(bucket_name=sys.argv[1], bucket_generation=sys.argv[2])
diff --git a/storage/samples/snippets/storage_rotate_encryption_key.py b/storage/samples/snippets/storage_rotate_encryption_key.py
new file mode 100644
index 00000000000..174947b843e
--- /dev/null
+++ b/storage/samples/snippets/storage_rotate_encryption_key.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_rotate_encryption_key]
+import base64
+# [END storage_rotate_encryption_key]
+import sys
+# [START storage_rotate_encryption_key]
+
+from google.cloud import storage
+
+
+def rotate_encryption_key(
+ bucket_name, blob_name, base64_encryption_key, base64_new_encryption_key
+):
+ """Performs a key rotation by re-writing an encrypted blob with a new
+ encryption key."""
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ current_encryption_key = base64.b64decode(base64_encryption_key)
+ new_encryption_key = base64.b64decode(base64_new_encryption_key)
+
+ # Both source_blob and destination_blob refer to the same storage object,
+ # but destination_blob has the new encryption key.
+ source_blob = bucket.blob(
+ blob_name, encryption_key=current_encryption_key
+ )
+ destination_blob = bucket.blob(
+ blob_name, encryption_key=new_encryption_key
+ )
+ generation_match_precondition = None
+ token = None
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to rewrite is aborted if the object's
+ # generation number does not match your precondition.
+ source_blob.reload() # Fetch blob metadata to use in generation_match_precondition.
+ generation_match_precondition = source_blob.generation
+
+ while True:
+ token, bytes_rewritten, total_bytes = destination_blob.rewrite(
+ source_blob, token=token, if_generation_match=generation_match_precondition
+ )
+ if token is None:
+ break
+
+ print(f"Key rotation complete for Blob {blob_name}")
+
+
+# [END storage_rotate_encryption_key]
+
+if __name__ == "__main__":
+ rotate_encryption_key(
+ bucket_name=sys.argv[1],
+ blob_name=sys.argv[2],
+ base64_encryption_key=sys.argv[3],
+ base64_new_encryption_key=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_set_autoclass.py b/storage/samples/snippets/storage_set_autoclass.py
new file mode 100644
index 00000000000..eec5a550f8c
--- /dev/null
+++ b/storage/samples/snippets/storage_set_autoclass.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_autoclass]
+from google.cloud import storage
+
+
+def set_autoclass(bucket_name):
+ """Configure the Autoclass setting for a bucket.
+
+ terminal_storage_class field is optional and defaults to NEARLINE if not otherwise specified.
+ Valid terminal_storage_class values are NEARLINE and ARCHIVE.
+ """
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+ # Enable Autoclass for a bucket. Set enabled to false to disable Autoclass.
+ # Set Autoclass.TerminalStorageClass, valid values are NEARLINE and ARCHIVE.
+ enabled = True
+ terminal_storage_class = "ARCHIVE"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ bucket.autoclass_enabled = enabled
+ bucket.autoclass_terminal_storage_class = terminal_storage_class
+ bucket.patch()
+ print(f"Autoclass enabled is set to {bucket.autoclass_enabled} for {bucket.name} at {bucket.autoclass_toggle_time}.")
+ print(f"Autoclass terminal storage class is {bucket.autoclass_terminal_storage_class}.")
+
+ return bucket
+
+
+# [END storage_set_autoclass]
+
+if __name__ == "__main__":
+ set_autoclass(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_bucket_default_kms_key.py b/storage/samples/snippets/storage_set_bucket_default_kms_key.py
new file mode 100644
index 00000000000..7ba4718b2be
--- /dev/null
+++ b/storage/samples/snippets/storage_set_bucket_default_kms_key.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_bucket_default_kms_key]
+from google.cloud import storage
+
+
+def enable_default_kms_key(bucket_name, kms_key_name):
+ """Sets a bucket's default KMS key."""
+ # bucket_name = "your-bucket-name"
+ # kms_key_name = "projects/PROJ/locations/LOC/keyRings/RING/cryptoKey/KEY"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+ bucket.default_kms_key_name = kms_key_name
+ bucket.patch()
+
+ print(
+ "Set default KMS key for bucket {} to {}.".format(
+ bucket.name, bucket.default_kms_key_name
+ )
+ )
+
+
+# [END storage_set_bucket_default_kms_key]
+
+if __name__ == "__main__":
+ enable_default_kms_key(bucket_name=sys.argv[1], kms_key_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_set_bucket_encryption_enforcement_config.py b/storage/samples/snippets/storage_set_bucket_encryption_enforcement_config.py
new file mode 100644
index 00000000000..107564e7f6c
--- /dev/null
+++ b/storage/samples/snippets/storage_set_bucket_encryption_enforcement_config.py
@@ -0,0 +1,55 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_set_bucket_encryption_enforcement_config]
+from google.cloud import storage
+from google.cloud.storage.bucket import EncryptionEnforcementConfig
+
+
+def set_bucket_encryption_enforcement_config(bucket_name):
+ """Creates a bucket with encryption enforcement configuration."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-unique-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Setting restriction_mode to "FullyRestricted" for Google-managed encryption (GMEK)
+ # means objects cannot be created using the default Google-managed keys.
+ bucket.encryption.google_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+
+ # Setting restriction_mode to "NotRestricted" for Customer-managed encryption (CMEK)
+ # ensures that objects ARE permitted to be created using Cloud KMS keys.
+ bucket.encryption.customer_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="NotRestricted")
+ )
+
+ # Setting restriction_mode to "FullyRestricted" for Customer-supplied encryption (CSEK)
+ # prevents objects from being created using raw, client-side provided keys.
+ bucket.encryption.customer_supplied_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+
+ bucket.create()
+
+ print(f"Created bucket {bucket.name} with Encryption Enforcement Config.")
+
+
+# [END storage_set_bucket_encryption_enforcement_config]
+
+
+if __name__ == "__main__":
+ set_bucket_encryption_enforcement_config(bucket_name="your-unique-bucket-name")
diff --git a/storage/samples/snippets/storage_set_bucket_public_iam.py b/storage/samples/snippets/storage_set_bucket_public_iam.py
new file mode 100644
index 00000000000..0fb33f59c65
--- /dev/null
+++ b/storage/samples/snippets/storage_set_bucket_public_iam.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_bucket_public_iam]
+from typing import List
+
+from google.cloud import storage
+
+
+def set_bucket_public_iam(
+ bucket_name: str = "your-bucket-name",
+ members: List[str] = ["allUsers"],
+):
+ """Set a public IAM Policy to bucket"""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+ policy.bindings.append(
+ {"role": "roles/storage.objectViewer", "members": members}
+ )
+
+ bucket.set_iam_policy(policy)
+
+ print(f"Bucket {bucket.name} is now publicly readable")
+
+
+# [END storage_set_bucket_public_iam]
+
+if __name__ == "__main__":
+ set_bucket_public_iam(
+ bucket_name=sys.argv[1],
+ )
diff --git a/storage/samples/snippets/storage_set_client_endpoint.py b/storage/samples/snippets/storage_set_client_endpoint.py
new file mode 100644
index 00000000000..99ca283a18b
--- /dev/null
+++ b/storage/samples/snippets/storage_set_client_endpoint.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that creates a new bucket in a specified region
+"""
+
+# [START storage_set_client_endpoint]
+
+from google.cloud import storage
+
+
+def set_client_endpoint(api_endpoint):
+ """Initiates client with specified endpoint."""
+ # api_endpoint = 'https://storage.googleapis.com'
+
+ storage_client = storage.Client(client_options={'api_endpoint': api_endpoint})
+
+ print(f"client initiated with endpoint: {storage_client._connection.API_BASE_URL}")
+
+ return storage_client
+
+
+# [END storage_set_client_endpoint]
+
+if __name__ == "__main__":
+ set_client_endpoint(api_endpoint=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_event_based_hold.py b/storage/samples/snippets/storage_set_event_based_hold.py
new file mode 100644
index 00000000000..76f7fd7eee4
--- /dev/null
+++ b/storage/samples/snippets/storage_set_event_based_hold.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_event_based_hold]
+from google.cloud import storage
+
+
+def set_event_based_hold(bucket_name, blob_name):
+ """Sets a event based hold on a given blob"""
+ # bucket_name = "my-bucket"
+ # blob_name = "my-blob"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ metageneration_match_precondition = None
+
+ # Optional: set a metageneration-match precondition to avoid potential race
+ # conditions and data corruptions. The request to patch is aborted if the
+ # object's metageneration does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in metageneration_match_precondition.
+ metageneration_match_precondition = blob.metageneration
+
+ blob.event_based_hold = True
+ blob.patch(if_metageneration_match=metageneration_match_precondition)
+
+ print(f"Event based hold was set for {blob_name}")
+
+
+# [END storage_set_event_based_hold]
+
+
+if __name__ == "__main__":
+ set_event_based_hold(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_set_metadata.py b/storage/samples/snippets/storage_set_metadata.py
new file mode 100644
index 00000000000..6a4a9fb9e08
--- /dev/null
+++ b/storage/samples/snippets/storage_set_metadata.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2020 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_metadata]
+from google.cloud import storage
+
+
+def set_blob_metadata(bucket_name, blob_name):
+ """Set a blob's metadata."""
+ # bucket_name = 'your-bucket-name'
+ # blob_name = 'your-object-name'
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.get_blob(blob_name)
+ metageneration_match_precondition = None
+
+ # Optional: set a metageneration-match precondition to avoid potential race
+ # conditions and data corruptions. The request to patch is aborted if the
+ # object's metageneration does not match your precondition.
+ metageneration_match_precondition = blob.metageneration
+
+ metadata = {'color': 'Red', 'name': 'Test'}
+ blob.metadata = metadata
+ blob.patch(if_metageneration_match=metageneration_match_precondition)
+
+ print(f"The metadata for the blob {blob.name} is {blob.metadata}")
+
+
+# [END storage_set_metadata]
+
+if __name__ == "__main__":
+ set_blob_metadata(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_set_object_retention_policy.py b/storage/samples/snippets/storage_set_object_retention_policy.py
new file mode 100644
index 00000000000..d0d3a54ec50
--- /dev/null
+++ b/storage/samples/snippets/storage_set_object_retention_policy.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import sys
+
+# [START storage_set_object_retention_policy]
+from google.cloud import storage
+
+
+def set_object_retention_policy(bucket_name, contents, destination_blob_name):
+ """Set the object retention policy of a file."""
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The contents to upload to the file
+ # contents = "these are my contents"
+
+ # The ID of your GCS object
+ # destination_blob_name = "storage-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name)
+ blob.upload_from_string(contents)
+
+ # Set the retention policy for the file.
+ blob.retention.mode = "Unlocked"
+ retention_date = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=10)
+ blob.retention.retain_until_time = retention_date
+ blob.patch()
+ print(
+ f"Retention policy for file {destination_blob_name} was set to: {blob.retention.mode}."
+ )
+
+ # To modify an existing policy on an unlocked file object, pass in the override parameter.
+ new_retention_date = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=9)
+ blob.retention.retain_until_time = new_retention_date
+ blob.patch(override_unlocked_retention=True)
+ print(
+ f"Retention policy for file {destination_blob_name} was updated to: {blob.retention.retain_until_time}."
+ )
+
+
+# [END storage_set_object_retention_policy]
+
+
+if __name__ == "__main__":
+ set_object_retention_policy(
+ bucket_name=sys.argv[1],
+ contents=sys.argv[2],
+ destination_blob_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_set_public_access_prevention_enforced.py b/storage/samples/snippets/storage_set_public_access_prevention_enforced.py
new file mode 100644
index 00000000000..59ce5ce56ef
--- /dev/null
+++ b/storage/samples/snippets/storage_set_public_access_prevention_enforced.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_public_access_prevention_enforced]
+from google.cloud import storage
+from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_ENFORCED
+
+
+def set_public_access_prevention_enforced(bucket_name):
+ """Enforce public access prevention for a bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ bucket.iam_configuration.public_access_prevention = (
+ PUBLIC_ACCESS_PREVENTION_ENFORCED
+ )
+ bucket.patch()
+
+ print(f"Public access prevention is set to enforced for {bucket.name}.")
+
+
+# [END storage_set_public_access_prevention_enforced]
+
+if __name__ == "__main__":
+ set_public_access_prevention_enforced(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_public_access_prevention_inherited.py b/storage/samples/snippets/storage_set_public_access_prevention_inherited.py
new file mode 100644
index 00000000000..97e218f9d0a
--- /dev/null
+++ b/storage/samples/snippets/storage_set_public_access_prevention_inherited.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that sets public access prevention to inherited.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/using-public-access-prevention
+For more information, see README.md.
+"""
+
+# [START storage_set_public_access_prevention_inherited]
+
+from google.cloud import storage
+from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
+
+
+def set_public_access_prevention_inherited(bucket_name):
+ """Sets the public access prevention status to inherited, so that the bucket inherits its setting from its parent project."""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ bucket.iam_configuration.public_access_prevention = (
+ PUBLIC_ACCESS_PREVENTION_INHERITED
+ )
+ bucket.patch()
+
+ print(f"Public access prevention is 'inherited' for {bucket.name}.")
+
+
+# [END storage_set_public_access_prevention_inherited]
+
+if __name__ == "__main__":
+ set_public_access_prevention_inherited(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_retention_policy.py b/storage/samples/snippets/storage_set_retention_policy.py
new file mode 100644
index 00000000000..2b36024919a
--- /dev/null
+++ b/storage/samples/snippets/storage_set_retention_policy.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_retention_policy]
+from google.cloud import storage
+
+
+def set_retention_policy(bucket_name, retention_period):
+ """Defines a retention policy on a given bucket"""
+ # bucket_name = "my-bucket"
+ # retention_period = 10
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ bucket.retention_period = retention_period
+ bucket.patch()
+
+ print(
+ "Bucket {} retention period set for {} seconds".format(
+ bucket.name, bucket.retention_period
+ )
+ )
+
+
+# [END storage_set_retention_policy]
+
+
+if __name__ == "__main__":
+ set_retention_policy(bucket_name=sys.argv[1], retention_period=sys.argv[2])
diff --git a/storage/samples/snippets/storage_set_rpo_async_turbo.py b/storage/samples/snippets/storage_set_rpo_async_turbo.py
new file mode 100644
index 00000000000..a351cb8f82e
--- /dev/null
+++ b/storage/samples/snippets/storage_set_rpo_async_turbo.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that sets RPO (Recovery Point Objective) to ASYNC_TURBO
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/managing-turbo-replication
+For more information, see README.md.
+"""
+
+# [START storage_set_rpo_async_turbo]
+
+from google.cloud import storage
+from google.cloud.storage.constants import RPO_ASYNC_TURBO
+
+
+def set_rpo_async_turbo(bucket_name):
+ """Sets the RPO to ASYNC_TURBO, enabling the turbo replication feature"""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ bucket.rpo = RPO_ASYNC_TURBO
+ bucket.patch()
+
+ print(f"RPO is set to ASYNC_TURBO for {bucket.name}.")
+
+
+# [END storage_set_rpo_async_turbo]
+
+if __name__ == "__main__":
+ set_rpo_async_turbo(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_rpo_default.py b/storage/samples/snippets/storage_set_rpo_default.py
new file mode 100644
index 00000000000..883fee0c972
--- /dev/null
+++ b/storage/samples/snippets/storage_set_rpo_default.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""Sample that sets the replication behavior or recovery point objective (RPO) to default.
+This sample is used on this page:
+ https://cloud.google.com/storage/docs/managing-turbo-replication
+For more information, see README.md.
+"""
+
+# [START storage_set_rpo_default]
+
+from google.cloud import storage
+from google.cloud.storage.constants import RPO_DEFAULT
+
+
+def set_rpo_default(bucket_name):
+ """Sets the RPO to DEFAULT, disabling the turbo replication feature"""
+ # The ID of your GCS bucket
+ # bucket_name = "my-bucket"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ bucket.rpo = RPO_DEFAULT
+ bucket.patch()
+
+ print(f"RPO is set to DEFAULT for {bucket.name}.")
+
+
+# [END storage_set_rpo_default]
+
+if __name__ == "__main__":
+ set_rpo_default(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/storage_set_soft_delete_policy.py b/storage/samples/snippets/storage_set_soft_delete_policy.py
new file mode 100644
index 00000000000..26bc5943664
--- /dev/null
+++ b/storage/samples/snippets/storage_set_soft_delete_policy.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_soft_delete_policy]
+from google.cloud import storage
+
+
+def set_soft_delete_policy(bucket_name, duration_in_seconds):
+ """Sets a soft-delete policy on the bucket"""
+ # bucket_name = "your-bucket-name"
+ # duration_in_seconds = "your-soft-delete-retention-duration-in-seconds"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ bucket.soft_delete_policy.retention_duration_seconds = duration_in_seconds
+ bucket.patch()
+
+ print(
+ f"Soft delete policy for bucket {bucket_name} was set to {duration_in_seconds} seconds retention period"
+ )
+
+
+# [END storage_set_soft_delete_policy]
+
+if __name__ == "__main__":
+ set_soft_delete_policy(bucket_name=sys.argv[1], duration_in_seconds=sys.argv[2])
diff --git a/storage/samples/snippets/storage_set_temporary_hold.py b/storage/samples/snippets/storage_set_temporary_hold.py
new file mode 100644
index 00000000000..a91521bcc11
--- /dev/null
+++ b/storage/samples/snippets/storage_set_temporary_hold.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_set_temporary_hold]
+from google.cloud import storage
+
+
+def set_temporary_hold(bucket_name, blob_name):
+ """Sets a temporary hold on a given blob"""
+ # bucket_name = "my-bucket"
+ # blob_name = "my-blob"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ metageneration_match_precondition = None
+
+ # Optional: set a metageneration-match precondition to avoid potential race
+ # conditions and data corruptions. The request to patch is aborted if the
+ # object's metageneration does not match your precondition.
+ blob.reload() # Fetch blob metadata to use in metageneration_match_precondition.
+ metageneration_match_precondition = blob.metageneration
+
+ blob.temporary_hold = True
+ blob.patch(if_metageneration_match=metageneration_match_precondition)
+
+ print("Temporary hold was set for #{blob_name}")
+
+
+# [END storage_set_temporary_hold]
+
+
+if __name__ == "__main__":
+ set_temporary_hold(bucket_name=sys.argv[1], blob_name=sys.argv[2])
diff --git a/storage/samples/snippets/storage_trace_quickstart.py b/storage/samples/snippets/storage_trace_quickstart.py
new file mode 100644
index 00000000000..322edc24051
--- /dev/null
+++ b/storage/samples/snippets/storage_trace_quickstart.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# Copyright 2024 Google LLC. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+"""
+Sample that exports OpenTelemetry Traces collected from the Storage client to Cloud Trace.
+"""
+
+
+def run_quickstart(bucket_name, blob_name, data):
+ # [START storage_enable_otel_tracing]
+
+ from opentelemetry import trace
+ from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+ from opentelemetry.resourcedetector.gcp_resource_detector import (
+ GoogleCloudResourceDetector,
+ )
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+ from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+ # Optional: Enable traces emitted from the requests HTTP library.
+ from opentelemetry.instrumentation.requests import RequestsInstrumentor
+
+ from google.cloud import storage
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The ID of your GCS object
+ # blob_name = "your-object-name"
+ # The contents to upload to the file
+ # data = "The quick brown fox jumps over the lazy dog."
+
+ # In this sample, we use Google Cloud Trace to export the OpenTelemetry
+ # traces: https://cloud.google.com/trace/docs/setup/python-ot
+ # Choose and configure the exporter for your environment.
+
+ tracer_provider = TracerProvider(
+ # Sampling is set to ALWAYS_ON.
+ # It is recommended to sample based on a ratio to control trace ingestion volume,
+ # for instance, sampler=TraceIdRatioBased(0.2)
+ sampler=ALWAYS_ON,
+ resource=GoogleCloudResourceDetector().detect(),
+ )
+
+ # Export to Google Cloud Trace.
+ tracer_provider.add_span_processor(BatchSpanProcessor(CloudTraceSpanExporter()))
+ trace.set_tracer_provider(tracer_provider)
+
+ # Optional: Enable traces emitted from the requests HTTP library.
+ RequestsInstrumentor().instrument(tracer_provider=tracer_provider)
+
+ # Get the tracer and create a new root span.
+ tracer = tracer_provider.get_tracer("My App")
+ with tracer.start_as_current_span("trace-quickstart"):
+ # Instantiate a storage client and perform a write and read workload.
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+ blob.upload_from_string(data)
+ print(f"{blob_name} uploaded to {bucket_name}.")
+
+ blob.download_as_bytes()
+ print("Downloaded storage object {} from bucket {}.".format(blob_name, bucket_name))
+
+ # [END storage_enable_otel_tracing]
+
+
+if __name__ == "__main__":
+ run_quickstart(bucket_name=sys.argv[1], blob_name=sys.argv[2], data=sys.argv[3])
diff --git a/storage/samples/snippets/storage_transfer_manager_download_bucket.py b/storage/samples/snippets/storage_transfer_manager_download_bucket.py
new file mode 100644
index 00000000000..5d94a67aeea
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_download_bucket.py
@@ -0,0 +1,75 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_transfer_manager_download_bucket]
+def download_bucket_with_transfer_manager(
+ bucket_name, destination_directory="", workers=8, max_results=1000
+):
+ """Download all of the blobs in a bucket, concurrently in a process pool.
+
+ The filename of each blob once downloaded is derived from the blob name and
+ the `destination_directory `parameter. For complete control of the filename
+ of each blob, use transfer_manager.download_many() instead.
+
+ Directories will be created automatically as needed, for instance to
+ accommodate blob names that include slashes.
+ """
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The directory on your computer to which to download all of the files. This
+ # string is prepended (with os.path.join()) to the name of each blob to form
+ # the full path. Relative paths and absolute paths are both accepted. An
+ # empty string means "the current working directory". Note that this
+ # parameter allows accepts directory traversal ("../" etc.) and is not
+ # intended for unsanitized end user input.
+ # destination_directory = ""
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case, but smaller files usually
+ # benefit from a higher number of processes. Each additional process occupies
+ # some CPU and memory resources until finished. Threads can be used instead
+ # of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ # The maximum number of results to fetch from bucket.list_blobs(). This
+ # sample code fetches all of the blobs up to max_results and queues them all
+ # for download at once. Though they will still be executed in batches up to
+ # the processes limit, queueing them all at once can be taxing on system
+ # memory if buckets are very large. Adjust max_results as needed for your
+ # system environment, or set it to None if you are sure the bucket is not
+ # too large to hold in memory easily.
+ # max_results=1000
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ blob_names = [blob.name for blob in bucket.list_blobs(max_results=max_results)]
+
+ results = transfer_manager.download_many_to_path(
+ bucket, blob_names, destination_directory=destination_directory, max_workers=workers
+ )
+
+ for name, result in zip(blob_names, results):
+ # The results list is either `None` or an exception for each blob in
+ # the input list, in order.
+
+ if isinstance(result, Exception):
+ print("Failed to download {} due to exception: {}".format(name, result))
+ else:
+ print("Downloaded {} to {}.".format(name, destination_directory + name))
+# [END storage_transfer_manager_download_bucket]
diff --git a/storage/samples/snippets/storage_transfer_manager_download_chunks_concurrently.py b/storage/samples/snippets/storage_transfer_manager_download_chunks_concurrently.py
new file mode 100644
index 00000000000..b6ac9982d61
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_download_chunks_concurrently.py
@@ -0,0 +1,55 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_transfer_manager_download_chunks_concurrently]
+def download_chunks_concurrently(
+ bucket_name, blob_name, filename, chunk_size=32 * 1024 * 1024, workers=8
+):
+ """Download a single file in chunks, concurrently in a process pool."""
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The file to be downloaded
+ # blob_name = "target-file"
+
+ # The destination filename or path
+ # filename = ""
+
+ # The size of each chunk. The performance impact of this value depends on
+ # the use case. The remote service has a minimum of 5 MiB and a maximum of
+ # 5 GiB.
+ # chunk_size = 32 * 1024 * 1024 (32 MiB)
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case, but smaller files usually
+ # benefit from a higher number of processes. Each additional process occupies
+ # some CPU and memory resources until finished. Threads can be used instead
+ # of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(blob_name)
+
+ transfer_manager.download_chunks_concurrently(
+ blob, filename, chunk_size=chunk_size, max_workers=workers
+ )
+
+ print("Downloaded {} to {}.".format(blob_name, filename))
+
+
+# [END storage_transfer_manager_download_chunks_concurrently]
diff --git a/storage/samples/snippets/storage_transfer_manager_download_many.py b/storage/samples/snippets/storage_transfer_manager_download_many.py
new file mode 100644
index 00000000000..447d0869c5b
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_download_many.py
@@ -0,0 +1,126 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Example usage:
+# python samples/snippets/storage_transfer_manager_download_many.py \
+# --bucket_name \
+# --blobs \
+# --destination_directory \
+# --blob_name_prefix
+
+
+# [START storage_transfer_manager_download_many]
+def download_many_blobs_with_transfer_manager(
+ bucket_name, blob_names, destination_directory="", blob_name_prefix="", workers=8
+):
+ """Download blobs in a list by name, concurrently in a process pool.
+
+ The filename of each blob once downloaded is derived from the blob name and
+ the `destination_directory `parameter. For complete control of the filename
+ of each blob, use transfer_manager.download_many() instead.
+
+ Directories will be created automatically as needed to accommodate blob
+ names that include slashes.
+ """
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The list of blob names to download. The names of each blobs will also
+ # be the name of each destination file (use transfer_manager.download_many()
+ # instead to control each destination file name). If there is a "/" in the
+ # blob name, then corresponding directories will be created on download.
+ # blob_names = ["myblob", "myblob2"]
+
+ # The directory on your computer to which to download all of the files. This
+ # string is prepended to the name of each blob to form the full path using
+ # pathlib. Relative paths and absolute paths are both accepted. An empty
+ # string means "the current working directory". Note that this parameter
+ # will NOT allow files to escape the destination_directory and will skip
+ # downloads that attempt directory traversal outside of it.
+ # destination_directory = ""
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case, but smaller files usually
+ # benefit from a higher number of processes. Each additional process occupies
+ # some CPU and memory resources until finished. Threads can be used instead
+ # of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ results = transfer_manager.download_many_to_path(
+ bucket,
+ blob_names,
+ destination_directory=destination_directory,
+ blob_name_prefix=blob_name_prefix,
+ max_workers=workers,
+ )
+
+ for name, result in zip(blob_names, results):
+ # The results list is either `None`, an exception, or a warning for each blob in
+ # the input list, in order.
+ if isinstance(result, UserWarning):
+ print("Skipped download for {} due to warning: {}".format(name, result))
+ elif isinstance(result, Exception):
+ print("Failed to download {} due to exception: {}".format(name, result))
+ else:
+ print(
+ "Downloaded {} inside {} directory.".format(name, destination_directory)
+ )
+
+
+# [END storage_transfer_manager_download_many]
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description="Download blobs in a list by name, concurrently in a process pool."
+ )
+ parser.add_argument(
+ "--bucket_name", required=True, help="The name of your GCS bucket"
+ )
+ parser.add_argument(
+ "--blobs",
+ nargs="+",
+ required=True,
+ help="The list of blob names to download",
+ )
+ parser.add_argument(
+ "--destination_directory",
+ default="",
+ help="The directory on your computer to which to download all of the files",
+ )
+ parser.add_argument(
+ "--blob_name_prefix",
+ default="",
+ help="A string that will be prepended to each blob_name to determine the source blob name",
+ )
+ parser.add_argument(
+ "--workers", type=int, default=8, help="The maximum number of processes to use"
+ )
+
+ args = parser.parse_args()
+
+ download_many_blobs_with_transfer_manager(
+ bucket_name=args.bucket_name,
+ blob_names=args.blobs,
+ destination_directory=args.destination_directory,
+ blob_name_prefix=args.blob_name_prefix,
+ workers=args.workers,
+ )
diff --git a/storage/samples/snippets/storage_transfer_manager_upload_chunks_concurrently.py b/storage/samples/snippets/storage_transfer_manager_upload_chunks_concurrently.py
new file mode 100644
index 00000000000..a4abd13b98b
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_upload_chunks_concurrently.py
@@ -0,0 +1,95 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+
+
+# [START storage_transfer_manager_upload_chunks_concurrently]
+def upload_chunks_concurrently(
+ bucket_name,
+ source_filename,
+ destination_blob_name,
+ chunk_size=32 * 1024 * 1024,
+ workers=8,
+):
+ """Upload a single file, in chunks, concurrently in a process pool."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The path to your file to upload
+ # source_filename = "local/path/to/file"
+
+ # The ID of your GCS object
+ # destination_blob_name = "storage-object-name"
+
+ # The size of each chunk. The performance impact of this value depends on
+ # the use case. The remote service has a minimum of 5 MiB and a maximum of
+ # 5 GiB.
+ # chunk_size = 32 * 1024 * 1024 (32 MiB)
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case. Each additional process
+ # occupies some CPU and memory resources until finished. Threads can be used
+ # instead of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name)
+
+ transfer_manager.upload_chunks_concurrently(
+ source_filename, blob, chunk_size=chunk_size, max_workers=workers
+ )
+
+ print(f"File {source_filename} uploaded to {destination_blob_name}.")
+
+
+if __name__ == "__main__":
+ argparse = argparse.ArgumentParser(
+ description="Upload a file to GCS in chunks concurrently."
+ )
+ argparse.add_argument(
+ "--bucket_name", help="The name of the GCS bucket to upload to."
+ )
+ argparse.add_argument(
+ "--source_filename", help="The local path to the file to upload."
+ )
+ argparse.add_argument(
+ "--destination_blob_name", help="The name of the object in GCS."
+ )
+ argparse.add_argument(
+ "--chunk_size",
+ type=int,
+ default=32 * 1024 * 1024,
+ help="The size of each chunk in bytes (default: 32 MiB). The remote\
+ service has a minimum of 5 MiB and a maximum of 5 GiB",
+ )
+ argparse.add_argument(
+ "--workers",
+ type=int,
+ default=8,
+ help="The number of worker processes to use (default: 8).",
+ )
+ args = argparse.parse_args()
+ upload_chunks_concurrently(
+ args.bucket_name,
+ args.source_filename,
+ args.destination_blob_name,
+ args.chunk_size,
+ args.workers,
+ )
+
+
+# [END storage_transfer_manager_upload_chunks_concurrently]
diff --git a/storage/samples/snippets/storage_transfer_manager_upload_directory.py b/storage/samples/snippets/storage_transfer_manager_upload_directory.py
new file mode 100644
index 00000000000..329ca108133
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_upload_directory.py
@@ -0,0 +1,80 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_transfer_manager_upload_directory]
+def upload_directory_with_transfer_manager(bucket_name, source_directory, workers=8):
+ """Upload every file in a directory, including all files in subdirectories.
+
+ Each blob name is derived from the filename, not including the `directory`
+ parameter itself. For complete control of the blob name for each file (and
+ other aspects of individual blob metadata), use
+ transfer_manager.upload_many() instead.
+ """
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The directory on your computer to upload. Files in the directory and its
+ # subdirectories will be uploaded. An empty string means "the current
+ # working directory".
+ # source_directory=""
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case, but smaller files usually
+ # benefit from a higher number of processes. Each additional process occupies
+ # some CPU and memory resources until finished. Threads can be used instead
+ # of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ from pathlib import Path
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Generate a list of paths (in string form) relative to the `directory`.
+ # This can be done in a single list comprehension, but is expanded into
+ # multiple lines here for clarity.
+
+ # First, recursively get all files in `directory` as Path objects.
+ directory_as_path_obj = Path(source_directory)
+ paths = directory_as_path_obj.rglob("*")
+
+ # Filter so the list only includes files, not directories themselves.
+ file_paths = [path for path in paths if path.is_file()]
+
+ # These paths are relative to the current working directory. Next, make them
+ # relative to `directory`
+ relative_paths = [path.relative_to(source_directory) for path in file_paths]
+
+ # Finally, convert them all to strings.
+ string_paths = [str(path) for path in relative_paths]
+
+ print("Found {} files.".format(len(string_paths)))
+
+ # Start the upload.
+ results = transfer_manager.upload_many_from_filenames(
+ bucket, string_paths, source_directory=source_directory, max_workers=workers
+ )
+
+ for name, result in zip(string_paths, results):
+ # The results list is either `None` or an exception for each filename in
+ # the input list, in order.
+
+ if isinstance(result, Exception):
+ print("Failed to upload {} due to exception: {}".format(name, result))
+ else:
+ print("Uploaded {} to {}.".format(name, bucket.name))
+# [END storage_transfer_manager_upload_directory]
diff --git a/storage/samples/snippets/storage_transfer_manager_upload_many.py b/storage/samples/snippets/storage_transfer_manager_upload_many.py
new file mode 100644
index 00000000000..1b9b9fc8983
--- /dev/null
+++ b/storage/samples/snippets/storage_transfer_manager_upload_many.py
@@ -0,0 +1,67 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_transfer_manager_upload_many]
+def upload_many_blobs_with_transfer_manager(
+ bucket_name, filenames, source_directory="", workers=8
+):
+ """Upload every file in a list to a bucket, concurrently in a process pool.
+
+ Each blob name is derived from the filename, not including the
+ `source_directory` parameter. For complete control of the blob name for each
+ file (and other aspects of individual blob metadata), use
+ transfer_manager.upload_many() instead.
+ """
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # A list (or other iterable) of filenames to upload.
+ # filenames = ["file_1.txt", "file_2.txt"]
+
+ # The directory on your computer that is the root of all of the files in the
+ # list of filenames. This string is prepended (with os.path.join()) to each
+ # filename to get the full path to the file. Relative paths and absolute
+ # paths are both accepted. This string is not included in the name of the
+ # uploaded blob; it is only used to find the source files. An empty string
+ # means "the current working directory". Note that this parameter allows
+ # directory traversal (e.g. "/", "../") and is not intended for unsanitized
+ # end user input.
+ # source_directory=""
+
+ # The maximum number of processes to use for the operation. The performance
+ # impact of this value depends on the use case, but smaller files usually
+ # benefit from a higher number of processes. Each additional process occupies
+ # some CPU and memory resources until finished. Threads can be used instead
+ # of processes by passing `worker_type=transfer_manager.THREAD`.
+ # workers=8
+
+ from google.cloud.storage import Client, transfer_manager
+
+ storage_client = Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ results = transfer_manager.upload_many_from_filenames(
+ bucket, filenames, source_directory=source_directory, max_workers=workers
+ )
+
+ for name, result in zip(filenames, results):
+ # The results list is either `None` or an exception for each filename in
+ # the input list, in order.
+
+ if isinstance(result, Exception):
+ print("Failed to upload {} due to exception: {}".format(name, result))
+ else:
+ print("Uploaded {} to {}.".format(name, bucket.name))
+# [END storage_transfer_manager_upload_many]
diff --git a/storage/samples/snippets/storage_update_bucket_encryption_enforcement_config.py b/storage/samples/snippets/storage_update_bucket_encryption_enforcement_config.py
new file mode 100644
index 00000000000..9b704bc0b8d
--- /dev/null
+++ b/storage/samples/snippets/storage_update_bucket_encryption_enforcement_config.py
@@ -0,0 +1,60 @@
+# Copyright 2026 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_update_bucket_encryption_enforcement_config]
+from google.cloud import storage
+from google.cloud.storage.bucket import EncryptionEnforcementConfig
+
+
+def update_bucket_encryption_enforcement_config(bucket_name):
+ """Updates the encryption enforcement policy for a bucket."""
+ # The ID of your GCS bucket with GMEK and CSEK restricted
+ # bucket_name = "your-unique-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.get_bucket(bucket_name)
+
+ # Update a specific type (e.g., change GMEK to NotRestricted)
+ bucket.encryption.google_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="NotRestricted")
+ )
+
+ # Update another type (e.g., change CMEK to FullyRestricted)
+ bucket.encryption.customer_managed_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+
+ # Keeping CSEK unchanged
+ bucket.encryption.customer_supplied_encryption_enforcement_config = (
+ EncryptionEnforcementConfig(restriction_mode="FullyRestricted")
+ )
+
+ bucket.patch()
+
+ print(f"Encryption enforcement policy updated for bucket {bucket.name}.")
+
+ gmek = bucket.encryption.google_managed_encryption_enforcement_config
+ cmek = bucket.encryption.customer_managed_encryption_enforcement_config
+ csek = bucket.encryption.customer_supplied_encryption_enforcement_config
+
+ print(f"GMEK restriction mode: {gmek.restriction_mode if gmek else 'None'}")
+ print(f"CMEK restriction mode: {cmek.restriction_mode if cmek else 'None'}")
+ print(f"CSEK restriction mode: {csek.restriction_mode if csek else 'None'}")
+
+
+# [END storage_update_bucket_encryption_enforcement_config]
+
+
+if __name__ == "__main__":
+ update_bucket_encryption_enforcement_config(bucket_name="your-unique-bucket-name")
diff --git a/storage/samples/snippets/storage_upload_encrypted_file.py b/storage/samples/snippets/storage_upload_encrypted_file.py
new file mode 100644
index 00000000000..08f58154e07
--- /dev/null
+++ b/storage/samples/snippets/storage_upload_encrypted_file.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START storage_upload_encrypted_file]
+import base64
+# [END storage_upload_encrypted_file]
+import sys
+# [START storage_upload_encrypted_file]
+
+from google.cloud import storage
+
+
+def upload_encrypted_blob(
+ bucket_name,
+ source_file_name,
+ destination_blob_name,
+ base64_encryption_key,
+):
+ """Uploads a file to a Google Cloud Storage bucket using a custom
+ encryption key.
+
+ The file will be encrypted by Google Cloud Storage and only
+ retrievable using the provided encryption key.
+ """
+ # bucket_name = "your-bucket-name"
+ # source_file_name = "local/path/to/file"
+ # destination_blob_name = "storage-object-name"
+ # base64_encryption_key = "TIbv/fjexq+VmtXzAlc63J4z5kFmWJ6NdAPQulQBT7g="
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ # Encryption key must be an AES256 key represented as a bytestring with
+ # 32 bytes. Since it's passed in as a base64 encoded string, it needs
+ # to be decoded.
+ encryption_key = base64.b64decode(base64_encryption_key)
+ blob = bucket.blob(
+ destination_blob_name, encryption_key=encryption_key
+ )
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to upload is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ generation_match_precondition = 0
+
+ blob.upload_from_filename(source_file_name, if_generation_match=generation_match_precondition)
+
+ print(
+ f"File {source_file_name} uploaded to {destination_blob_name}."
+ )
+
+
+# [END storage_upload_encrypted_file]
+
+if __name__ == "__main__":
+ upload_encrypted_blob(
+ bucket_name=sys.argv[1],
+ source_file_name=sys.argv[2],
+ destination_blob_name=sys.argv[3],
+ base64_encryption_key=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_upload_file.py b/storage/samples/snippets/storage_upload_file.py
new file mode 100644
index 00000000000..1e7ceda5eb4
--- /dev/null
+++ b/storage/samples/snippets/storage_upload_file.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_upload_file]
+from google.cloud import storage
+
+
+def upload_blob(bucket_name, source_file_name, destination_blob_name):
+ """Uploads a file to the bucket."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+ # The path to your file to upload
+ # source_file_name = "local/path/to/file"
+ # The ID of your GCS object
+ # destination_blob_name = "storage-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name)
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to upload is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ generation_match_precondition = 0
+
+ blob.upload_from_filename(source_file_name, if_generation_match=generation_match_precondition)
+
+ print(
+ f"File {source_file_name} uploaded to {destination_blob_name}."
+ )
+
+
+# [END storage_upload_file]
+
+if __name__ == "__main__":
+ upload_blob(
+ bucket_name=sys.argv[1],
+ source_file_name=sys.argv[2],
+ destination_blob_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_upload_from_memory.py b/storage/samples/snippets/storage_upload_from_memory.py
new file mode 100644
index 00000000000..eff3d222afd
--- /dev/null
+++ b/storage/samples/snippets/storage_upload_from_memory.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_file_upload_from_memory]
+from google.cloud import storage
+
+
+def upload_blob_from_memory(bucket_name, contents, destination_blob_name):
+ """Uploads a file to the bucket."""
+
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The contents to upload to the file
+ # contents = "these are my contents"
+
+ # The ID of your GCS object
+ # destination_blob_name = "storage-object-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name)
+
+ blob.upload_from_string(contents)
+
+ print(
+ f"{destination_blob_name} with contents {contents} uploaded to {bucket_name}."
+ )
+
+# [END storage_file_upload_from_memory]
+
+
+if __name__ == "__main__":
+ upload_blob_from_memory(
+ bucket_name=sys.argv[1],
+ contents=sys.argv[2],
+ destination_blob_name=sys.argv[3],
+ )
diff --git a/storage/samples/snippets/storage_upload_from_stream.py b/storage/samples/snippets/storage_upload_from_stream.py
new file mode 100644
index 00000000000..08eb2588907
--- /dev/null
+++ b/storage/samples/snippets/storage_upload_from_stream.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# [START storage_stream_file_upload]
+from google.cloud import storage
+
+
+def upload_blob_from_stream(bucket_name, file_obj, destination_blob_name):
+ """Uploads bytes from a stream or other file-like object to a blob."""
+ # The ID of your GCS bucket
+ # bucket_name = "your-bucket-name"
+
+ # The stream or file (file-like object) from which to read
+ # import io
+ # file_obj = io.BytesIO()
+ # file_obj.write(b"This is test data.")
+
+ # The desired name of the uploaded GCS object (blob)
+ # destination_blob_name = "storage-object-name"
+
+ # Construct a client-side representation of the blob.
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name)
+
+ # Rewind the stream to the beginning. This step can be omitted if the input
+ # stream will always be at a correct position.
+ file_obj.seek(0)
+
+ # Upload data from the stream to your bucket.
+ blob.upload_from_file(file_obj)
+
+ print(
+ f"Stream data uploaded to {destination_blob_name} in bucket {bucket_name}."
+ )
+
+# [END storage_stream_file_upload]
diff --git a/storage/samples/snippets/storage_upload_with_kms_key.py b/storage/samples/snippets/storage_upload_with_kms_key.py
new file mode 100644
index 00000000000..6e8fe039404
--- /dev/null
+++ b/storage/samples/snippets/storage_upload_with_kms_key.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_upload_with_kms_key]
+from google.cloud import storage
+
+
+def upload_blob_with_kms(
+ bucket_name, source_file_name, destination_blob_name, kms_key_name,
+):
+ """Uploads a file to the bucket, encrypting it with the given KMS key."""
+ # bucket_name = "your-bucket-name"
+ # source_file_name = "local/path/to/file"
+ # destination_blob_name = "storage-object-name"
+ # kms_key_name = "projects/PROJ/locations/LOC/keyRings/RING/cryptoKey/KEY"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+ blob = bucket.blob(destination_blob_name, kms_key_name=kms_key_name)
+
+ # Optional: set a generation-match precondition to avoid potential race conditions
+ # and data corruptions. The request to upload is aborted if the object's
+ # generation number does not match your precondition. For a destination
+ # object that does not yet exist, set the if_generation_match precondition to 0.
+ # If the destination object already exists in your bucket, set instead a
+ # generation-match precondition using its generation number.
+ generation_match_precondition = 0
+
+ blob.upload_from_filename(source_file_name, if_generation_match=generation_match_precondition)
+
+ print(
+ "File {} uploaded to {} with encryption key {}.".format(
+ source_file_name, destination_blob_name, kms_key_name
+ )
+ )
+
+
+# [END storage_upload_with_kms_key]
+
+if __name__ == "__main__":
+ upload_blob_with_kms(
+ bucket_name=sys.argv[1],
+ source_file_name=sys.argv[2],
+ destination_blob_name=sys.argv[3],
+ kms_key_name=sys.argv[4],
+ )
diff --git a/storage/samples/snippets/storage_view_bucket_iam_members.py b/storage/samples/snippets/storage_view_bucket_iam_members.py
new file mode 100644
index 00000000000..184a1361f0f
--- /dev/null
+++ b/storage/samples/snippets/storage_view_bucket_iam_members.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+# [START storage_view_bucket_iam_members]
+from google.cloud import storage
+
+
+def view_bucket_iam_members(bucket_name):
+ """View IAM Policy for a bucket"""
+ # bucket_name = "your-bucket-name"
+
+ storage_client = storage.Client()
+ bucket = storage_client.bucket(bucket_name)
+
+ policy = bucket.get_iam_policy(requested_policy_version=3)
+
+ for binding in policy.bindings:
+ print(f"Role: {binding['role']}, Members: {binding['members']}")
+
+
+# [END storage_view_bucket_iam_members]
+
+
+if __name__ == "__main__":
+ view_bucket_iam_members(bucket_name=sys.argv[1])
diff --git a/storage/samples/snippets/uniform_bucket_level_access_test.py b/storage/samples/snippets/uniform_bucket_level_access_test.py
new file mode 100644
index 00000000000..8b7964038ac
--- /dev/null
+++ b/storage/samples/snippets/uniform_bucket_level_access_test.py
@@ -0,0 +1,52 @@
+# Copyright 2019 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import storage_disable_uniform_bucket_level_access
+import storage_enable_uniform_bucket_level_access
+import storage_get_uniform_bucket_level_access
+
+
+def test_get_uniform_bucket_level_access(bucket, capsys):
+ storage_get_uniform_bucket_level_access.get_uniform_bucket_level_access(
+ bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Uniform bucket-level access is disabled for {bucket.name}."
+ in out
+ )
+
+
+def test_enable_uniform_bucket_level_access(bucket, capsys):
+ short_name = storage_enable_uniform_bucket_level_access
+ short_name.enable_uniform_bucket_level_access(
+ bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Uniform bucket-level access was enabled for {bucket.name}."
+ in out
+ )
+
+
+def test_disable_uniform_bucket_level_access(bucket, capsys):
+ short_name = storage_disable_uniform_bucket_level_access
+ short_name.disable_uniform_bucket_level_access(
+ bucket.name
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Uniform bucket-level access was disabled for {bucket.name}."
+ in out
+ )
diff --git a/storage/samples/snippets/zonal_buckets/README.md b/storage/samples/snippets/zonal_buckets/README.md
new file mode 100644
index 00000000000..71c17e5c3f1
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/README.md
@@ -0,0 +1,78 @@
+# Google Cloud Storage - Zonal Buckets Snippets
+
+This directory contains snippets for interacting with Google Cloud Storage zonal buckets.
+
+## Prerequisites
+
+- A Google Cloud Platform project with the Cloud Storage API enabled.
+- A zonal Google Cloud Storage bucket.
+
+## Running the snippets
+
+### Create and write to an appendable object
+
+This snippet uploads an appendable object to a zonal bucket.
+
+```bash
+python samples/snippets/zonal_buckets/storage_create_and_write_appendable_object.py --bucket_name --object_name
+```
+
+### Finalize an appendable object upload
+
+This snippet creates, writes to, and finalizes an appendable object.
+
+```bash
+python samples/snippets/zonal_buckets/storage_finalize_appendable_object_upload.py --bucket_name --object_name
+```
+
+### Pause and resume an appendable object upload
+
+This snippet demonstrates pausing and resuming an appendable object upload.
+
+```bash
+python samples/snippets/zonal_buckets/storage_pause_and_resume_appendable_upload.py --bucket_name --object_name
+```
+
+### Tail an appendable object
+
+This snippet demonstrates tailing an appendable GCS object, similar to `tail -f`.
+
+```bash
+python samples/snippets/zonal_buckets/storage_read_appendable_object_tail.py --bucket_name --object_name --duration
+```
+
+
+### Download a range of bytes from an object
+
+This snippet downloads a range of bytes from an object.
+
+```bash
+python samples/snippets/zonal_buckets/storage_open_object_single_ranged_read.py --bucket_name --object_name --start_byte --size
+```
+
+
+### Download multiple ranges of bytes from a single object
+
+This snippet downloads multiple ranges of bytes from a single object into different buffers.
+
+```bash
+python samples/snippets/zonal_buckets/storage_open_object_multiple_ranged_read.py --bucket_name --object_name
+```
+
+### Download the entire content of an object
+
+This snippet downloads the entire content of an object using a multi-range downloader.
+
+```bash
+python samples/snippets/zonal_buckets/storage_open_object_read_full_object.py --bucket_name --object_name
+```
+
+
+
+### Download a range of bytes from multiple objects concurrently
+
+This snippet downloads a range of bytes from multiple objects concurrently.
+
+```bash
+python samples/snippets/zonal_buckets/storage_open_multiple_objects_ranged_read.py --bucket_name --object_names
+```
\ No newline at end of file
diff --git a/storage/samples/snippets/zonal_buckets/storage_create_and_write_appendable_object.py b/storage/samples/snippets/zonal_buckets/storage_create_and_write_appendable_object.py
new file mode 100644
index 00000000000..725eeb2bd98
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_create_and_write_appendable_object.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+
+from google.cloud.storage.asyncio.async_appendable_object_writer import (
+ AsyncAppendableObjectWriter,
+)
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+
+
+# [START storage_create_and_write_appendable_object]
+
+
+async def storage_create_and_write_appendable_object(
+ bucket_name, object_name, grpc_client=None
+):
+ """Uploads an appendable object to zonal bucket.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+ writer = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ generation=0, # throws `FailedPrecondition` if object already exists.
+ )
+ # This creates a new appendable object of size 0 and opens it for appending.
+ await writer.open()
+
+ # appends data to the object
+ # you can perform `.append` multiple times as needed. Data will be appended
+ # to the end of the object.
+ await writer.append(b"Some data")
+
+ # Once all appends are done, close the gRPC bidirectional stream.
+ await writer.close()
+
+ print(
+ f"Appended object {object_name} created of size {writer.persisted_size} bytes."
+ )
+
+
+# [END storage_create_and_write_appendable_object]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_create_and_write_appendable_object(
+ bucket_name=args.bucket_name,
+ object_name=args.object_name,
+ )
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_finalize_appendable_object_upload.py b/storage/samples/snippets/zonal_buckets/storage_finalize_appendable_object_upload.py
new file mode 100644
index 00000000000..807fe40a58d
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_finalize_appendable_object_upload.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+
+from google.cloud.storage.asyncio.async_appendable_object_writer import (
+ AsyncAppendableObjectWriter,
+)
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+
+
+# [START storage_finalize_appendable_object_upload]
+async def storage_finalize_appendable_object_upload(
+ bucket_name, object_name, grpc_client=None
+):
+ """Creates, writes to, and finalizes an appendable object.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+ writer = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ generation=0, # throws `FailedPrecondition` if object already exists.
+ )
+ # This creates a new appendable object of size 0 and opens it for appending.
+ await writer.open()
+
+ # Appends data to the object.
+ await writer.append(b"Some data")
+
+ # finalize the appendable object,
+ # NOTE:
+ # 1. once finalized no more appends can be done to the object.
+ # 2. If you don't want to finalize, you can simply call `writer.close`
+ # 3. calling `.finalize()` also closes the grpc-bidi stream, calling
+ # `.close` after `.finalize` may lead to undefined behavior.
+ object_resource = await writer.finalize()
+
+ print(f"Appendable object {object_name} created and finalized.")
+ print("Object Metadata:")
+ print(object_resource)
+
+
+# [END storage_finalize_appendable_object_upload]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_finalize_appendable_object_upload(
+ bucket_name=args.bucket_name,
+ object_name=args.object_name,
+ )
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_open_multiple_objects_ranged_read.py b/storage/samples/snippets/zonal_buckets/storage_open_multiple_objects_ranged_read.py
new file mode 100644
index 00000000000..bed580d3662
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_open_multiple_objects_ranged_read.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Downloads a range of bytes from multiple objects concurrently.
+Example usage:
+ ```python samples/snippets/zonal_buckets/storage_open_multiple_objects_ranged_read.py \
+ --bucket_name \
+ --object_names ```
+"""
+import argparse
+import asyncio
+from io import BytesIO
+
+from google.cloud.storage.asyncio.async_grpc_client import (
+ AsyncGrpcClient,
+)
+from google.cloud.storage.asyncio.async_multi_range_downloader import (
+ AsyncMultiRangeDownloader,
+)
+
+
+# [START storage_open_multiple_objects_ranged_read]
+async def storage_open_multiple_objects_ranged_read(
+ bucket_name, object_names, grpc_client=None
+):
+ """Downloads a range of bytes from multiple objects concurrently.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+
+ async def _download_range(object_name):
+ """Helper coroutine to download a range from a single object."""
+ mrd = AsyncMultiRangeDownloader(grpc_client, bucket_name, object_name)
+ try:
+ # Open the object, mrd always opens in read mode.
+ await mrd.open()
+
+ # Each object downloads the first 100 bytes.
+ start_byte = 0
+ size = 100
+
+ # requested range will be downloaded into this buffer, user may provide
+ # their own buffer or file-like object.
+ output_buffer = BytesIO()
+ await mrd.download_ranges([(start_byte, size, output_buffer)])
+ finally:
+ if mrd.is_stream_open:
+ await mrd.close()
+
+ # Downloaded size can differ from requested size if object is smaller.
+ # mrd will download at most up to the end of the object.
+ downloaded_size = output_buffer.getbuffer().nbytes
+ print(f"Downloaded {downloaded_size} bytes from {object_name}")
+
+ download_tasks = [_download_range(name) for name in object_names]
+ await asyncio.gather(*download_tasks)
+
+
+# [END storage_open_multiple_objects_ranged_read]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument(
+ "--object_names", nargs="+", help="Your Cloud Storage object name(s)."
+ )
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_open_multiple_objects_ranged_read(args.bucket_name, args.object_names)
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_open_object_multiple_ranged_read.py b/storage/samples/snippets/zonal_buckets/storage_open_object_multiple_ranged_read.py
new file mode 100644
index 00000000000..b0f64c48690
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_open_object_multiple_ranged_read.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+from io import BytesIO
+
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+from google.cloud.storage.asyncio.async_multi_range_downloader import (
+ AsyncMultiRangeDownloader,
+)
+
+
+# [START storage_open_object_multiple_ranged_read]
+async def storage_open_object_multiple_ranged_read(
+ bucket_name, object_name, grpc_client=None
+):
+ """Downloads multiple ranges of bytes from a single object into different buffers.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+
+ mrd = AsyncMultiRangeDownloader(grpc_client, bucket_name, object_name)
+
+ try:
+ # Open the object, mrd always opens in read mode.
+ await mrd.open()
+
+ # Specify four different buffers to download ranges into.
+ buffers = [BytesIO(), BytesIO(), BytesIO(), BytesIO()]
+
+ # Define the ranges to download. Each range is a tuple of (start_byte, size, buffer).
+ # All ranges will download 10 bytes from different starting positions.
+ # We choose arbitrary start bytes for this example. An object should be large enough.
+ # A user can choose any start byte between 0 and `object_size`.
+ # If `start_bytes` is greater than `object_size`, mrd will throw an error.
+ ranges = [
+ (0, 10, buffers[0]),
+ (20, 10, buffers[1]),
+ (40, 10, buffers[2]),
+ (60, 10, buffers[3]),
+ ]
+
+ await mrd.download_ranges(ranges)
+
+ finally:
+ await mrd.close()
+
+ # Print the downloaded content from each buffer.
+ for i, output_buffer in enumerate(buffers):
+ downloaded_size = output_buffer.getbuffer().nbytes
+ print(
+ f"Downloaded {downloaded_size} bytes into buffer {i + 1} from start byte {ranges[i][0]}: {output_buffer.getvalue()}"
+ )
+
+
+# [END storage_open_object_multiple_ranged_read]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_open_object_multiple_ranged_read(args.bucket_name, args.object_name)
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_open_object_read_full_object.py b/storage/samples/snippets/zonal_buckets/storage_open_object_read_full_object.py
new file mode 100644
index 00000000000..2e18caabe23
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_open_object_read_full_object.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+from io import BytesIO
+
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+from google.cloud.storage.asyncio.async_multi_range_downloader import (
+ AsyncMultiRangeDownloader,
+)
+
+
+# [START storage_open_object_read_full_object]
+async def storage_open_object_read_full_object(
+ bucket_name, object_name, grpc_client=None
+):
+ """Downloads the entire content of an object using a multi-range downloader.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+
+ # mrd = Multi-Range-Downloader
+ mrd = AsyncMultiRangeDownloader(grpc_client, bucket_name, object_name)
+
+ try:
+ # Open the object, mrd always opens in read mode.
+ await mrd.open()
+
+ # This could be any buffer or file-like object.
+ output_buffer = BytesIO()
+ # A download range of (0, 0) means to read from the beginning to the end.
+ await mrd.download_ranges([(0, 0, output_buffer)])
+ finally:
+ if mrd.is_stream_open:
+ await mrd.close()
+
+ downloaded_bytes = output_buffer.getvalue()
+ print(
+ f"Downloaded all {len(downloaded_bytes)} bytes from object {object_name} in bucket {bucket_name}."
+ )
+
+
+# [END storage_open_object_read_full_object]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_open_object_read_full_object(args.bucket_name, args.object_name)
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_open_object_single_ranged_read.py b/storage/samples/snippets/zonal_buckets/storage_open_object_single_ranged_read.py
new file mode 100644
index 00000000000..74bec43f68e
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_open_object_single_ranged_read.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+from io import BytesIO
+
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+from google.cloud.storage.asyncio.async_multi_range_downloader import (
+ AsyncMultiRangeDownloader,
+)
+
+
+# [START storage_open_object_single_ranged_read]
+async def storage_open_object_single_ranged_read(
+ bucket_name, object_name, start_byte, size, grpc_client=None
+):
+ """Downloads a range of bytes from an object.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+
+ mrd = AsyncMultiRangeDownloader(grpc_client, bucket_name, object_name)
+
+ try:
+ # Open the object, mrd always opens in read mode.
+ await mrd.open()
+
+ # requested range will be downloaded into this buffer, user may provide
+ # their own buffer or file-like object.
+ output_buffer = BytesIO()
+ await mrd.download_ranges([(start_byte, size, output_buffer)])
+ finally:
+ if mrd.is_stream_open:
+ await mrd.close()
+
+ # Downloaded size can differ from requested size if object is smaller.
+ # mrd will download at most up to the end of the object.
+ downloaded_size = output_buffer.getbuffer().nbytes
+ print(f"Downloaded {downloaded_size} bytes from {object_name}")
+
+
+# [END storage_open_object_single_ranged_read]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+ parser.add_argument(
+ "--start_byte", type=int, help="The starting byte of the range."
+ )
+ parser.add_argument("--size", type=int, help="The number of bytes to download.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_open_object_single_ranged_read(
+ args.bucket_name, args.object_name, args.start_byte, args.size
+ )
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_pause_and_resume_appendable_upload.py b/storage/samples/snippets/zonal_buckets/storage_pause_and_resume_appendable_upload.py
new file mode 100644
index 00000000000..c758dc6419d
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_pause_and_resume_appendable_upload.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+
+from google.cloud.storage.asyncio.async_appendable_object_writer import (
+ AsyncAppendableObjectWriter,
+)
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+
+
+# [START storage_pause_and_resume_appendable_upload]
+async def storage_pause_and_resume_appendable_upload(
+ bucket_name, object_name, grpc_client=None
+):
+ """Demonstrates pausing and resuming an appendable object upload.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+
+ writer1 = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ )
+ await writer1.open()
+ await writer1.append(b"First part of the data. ")
+ print(f"Appended {writer1.persisted_size} bytes with the first writer.")
+
+ # 2. After appending some data, close the writer to "pause" the upload.
+ # NOTE: you can pause indefinitely and still read the conetent uploaded so far using MRD.
+ await writer1.close()
+
+ print("First writer closed. Upload is 'paused'.")
+
+ # 3. Create a new writer, passing the generation number from the previous
+ # writer. This is a precondition to ensure that the object hasn't been
+ # modified since we last accessed it.
+ generation_to_resume = writer1.generation
+ print(f"Generation to resume from is: {generation_to_resume}")
+
+ writer2 = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ generation=generation_to_resume,
+ )
+ # 4. Open the new writer.
+ try:
+ await writer2.open()
+
+ # 5. Append some more data using the new writer.
+ await writer2.append(b"Second part of the data.")
+ print(f"Appended more data. Total size is now {writer2.persisted_size} bytes.")
+ finally:
+ # 6. Finally, close the new writer.
+ if writer2._is_stream_open:
+ await writer2.close()
+ print("Second writer closed. Full object uploaded.")
+
+
+# [END storage_pause_and_resume_appendable_upload]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument("--object_name", help="Your Cloud Storage object name.")
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ storage_pause_and_resume_appendable_upload(
+ bucket_name=args.bucket_name,
+ object_name=args.object_name,
+ )
+ )
diff --git a/storage/samples/snippets/zonal_buckets/storage_read_appendable_object_tail.py b/storage/samples/snippets/zonal_buckets/storage_read_appendable_object_tail.py
new file mode 100644
index 00000000000..6248980669c
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/storage_read_appendable_object_tail.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+# Copyright 2026 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+import time
+from datetime import datetime
+from io import BytesIO
+
+from google.cloud.storage.asyncio.async_appendable_object_writer import (
+ AsyncAppendableObjectWriter,
+)
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+from google.cloud.storage.asyncio.async_multi_range_downloader import (
+ AsyncMultiRangeDownloader,
+)
+
+BYTES_TO_APPEND = b"fav_bytes." * 100 * 1024 * 1024
+NUM_BYTES_TO_APPEND_EVERY_SECOND = len(BYTES_TO_APPEND)
+
+
+# [START storage_read_appendable_object_tail]
+async def appender(writer: AsyncAppendableObjectWriter, duration: int):
+ """Appends 10 bytes to the object every second for a given duration."""
+ print("Appender started.")
+ bytes_appended = 0
+ start_time = time.monotonic()
+ # Run the appender for the specified duration.
+ while time.monotonic() - start_time < duration:
+ await writer.append(BYTES_TO_APPEND)
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ bytes_appended += NUM_BYTES_TO_APPEND_EVERY_SECOND
+ print(
+ f"[{now}] Appended {NUM_BYTES_TO_APPEND_EVERY_SECOND} new bytes. Total appended: {bytes_appended} bytes."
+ )
+ await asyncio.sleep(0.1)
+ print("Appender finished.")
+
+
+async def tailer(
+ bucket_name: str, object_name: str, duration: int, client: AsyncGrpcClient
+):
+ """Tails the object by reading new data as it is appended."""
+ print("Tailer started.")
+ start_byte = 0
+ start_time = time.monotonic()
+ mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)
+ try:
+ await mrd.open()
+ # Run the tailer for the specified duration.
+ while time.monotonic() - start_time < duration:
+ output_buffer = BytesIO()
+ # A download range of (start, 0) means to read from 'start' to the end.
+ await mrd.download_ranges([(start_byte, 0, output_buffer)])
+
+ bytes_downloaded = output_buffer.getbuffer().nbytes
+ if bytes_downloaded > 0:
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ print(f"[{now}] Tailer read {bytes_downloaded} new bytes: ")
+ start_byte += bytes_downloaded
+
+ await asyncio.sleep(0.1) # Poll for new data every 0.1 seconds.
+ finally:
+ if mrd.is_stream_open:
+ await mrd.close()
+ print("Tailer finished.")
+
+
+# read_appendable_object_tail simulates a "tail -f" command on a GCS object. It
+# repeatedly polls an appendable object for new content. In a real
+# application, the object would be written to by a separate process.
+async def read_appendable_object_tail(
+ bucket_name: str, object_name: str, duration: int, grpc_client=None
+):
+ """Main function to create an appendable object and run tasks.
+
+ grpc_client: an existing grpc_client to use, this is only for testing.
+ """
+ if grpc_client is None:
+ grpc_client = AsyncGrpcClient()
+ writer = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ )
+ # 1. Create an empty appendable object.
+ try:
+ # 1. Create an empty appendable object.
+ await writer.open()
+ print(f"Created empty appendable object: {object_name}")
+
+ # 2. Create the appender and tailer coroutines.
+ appender_task = asyncio.create_task(appender(writer, duration))
+ tailer_task = asyncio.create_task(
+ tailer(bucket_name, object_name, duration, grpc_client)
+ )
+
+ # 3. Execute the coroutines concurrently.
+ await asyncio.gather(appender_task, tailer_task)
+ finally:
+ if writer._is_stream_open:
+ await writer.close()
+ print("Writer closed.")
+
+
+# [END storage_read_appendable_object_tail]
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Demonstrates tailing an appendable GCS object.",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
+ parser.add_argument(
+ "--object_name", help="Your Cloud Storage object name to be created."
+ )
+ parser.add_argument(
+ "--duration",
+ type=int,
+ default=60,
+ help="Duration in seconds to run the demo.",
+ )
+
+ args = parser.parse_args()
+
+ asyncio.run(
+ read_appendable_object_tail(args.bucket_name, args.object_name, args.duration)
+ )
diff --git a/storage/samples/snippets/zonal_buckets/zonal_snippets_test.py b/storage/samples/snippets/zonal_buckets/zonal_snippets_test.py
new file mode 100644
index 00000000000..6852efe2286
--- /dev/null
+++ b/storage/samples/snippets/zonal_buckets/zonal_snippets_test.py
@@ -0,0 +1,260 @@
+# Copyright 2025 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio
+import uuid
+import os
+
+import pytest
+from google.cloud.storage import Client
+import contextlib
+
+from google.cloud.storage.asyncio.async_grpc_client import AsyncGrpcClient
+from google.cloud.storage.asyncio.async_appendable_object_writer import (
+ AsyncAppendableObjectWriter,
+)
+
+# Import all the snippets
+import storage_create_and_write_appendable_object
+import storage_finalize_appendable_object_upload
+import storage_open_multiple_objects_ranged_read
+import storage_open_object_multiple_ranged_read
+import storage_open_object_read_full_object
+import storage_open_object_single_ranged_read
+import storage_pause_and_resume_appendable_upload
+import storage_read_appendable_object_tail
+
+pytestmark = pytest.mark.skipif(
+ os.getenv("RUN_ZONAL_SYSTEM_TESTS") != "True",
+ reason="Zonal system tests need to be explicitly enabled. This helps scheduling tests in Kokoro and Cloud Build.",
+)
+
+
+# TODO: replace this with a fixture once zonal bucket creation / deletion
+# is supported in grpc client or json client client.
+_ZONAL_BUCKET = os.getenv("ZONAL_BUCKET")
+
+
+async def create_async_grpc_client():
+ """Initializes async client and gets the current event loop."""
+ return AsyncGrpcClient()
+
+
+# Forcing a single event loop for the whole test session
+@pytest.fixture(scope="session")
+def event_loop():
+ """Redefine pytest-asyncio's event_loop fixture to be session-scoped."""
+ loop = asyncio.get_event_loop_policy().new_event_loop()
+ yield loop
+ loop.close()
+
+
+@pytest.fixture(scope="session")
+def async_grpc_client(event_loop):
+ """Yields a StorageAsyncClient that is closed after the test session."""
+ grpc_client = event_loop.run_until_complete(create_async_grpc_client())
+ yield grpc_client
+
+
+@pytest.fixture(scope="session")
+def json_client():
+ client = Client()
+ with contextlib.closing(client):
+ yield client
+
+
+async def create_appendable_object(grpc_client, object_name, data):
+ writer = AsyncAppendableObjectWriter(
+ client=grpc_client,
+ bucket_name=_ZONAL_BUCKET,
+ object_name=object_name,
+ generation=0, # throws `FailedPrecondition` if object already exists.
+ )
+ await writer.open()
+ await writer.append(data)
+ await writer.close()
+ return writer.generation
+
+
+# TODO: replace this with a fixture once zonal bucket creation / deletion
+# is supported in grpc client or json client client.
+_ZONAL_BUCKET = os.getenv("ZONAL_BUCKET")
+
+
+def test_storage_create_and_write_appendable_object(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"zonal-snippets-test-{uuid.uuid4()}"
+
+ event_loop.run_until_complete(
+ storage_create_and_write_appendable_object.storage_create_and_write_appendable_object(
+ _ZONAL_BUCKET, object_name, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert f"Appended object {object_name} created of size" in out
+
+ blob = json_client.bucket(_ZONAL_BUCKET).blob(object_name)
+ blob.delete()
+
+
+def test_storage_finalize_appendable_object_upload(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-finalize-appendable-{uuid.uuid4()}"
+ event_loop.run_until_complete(
+ storage_finalize_appendable_object_upload.storage_finalize_appendable_object_upload(
+ _ZONAL_BUCKET, object_name, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert f"Appendable object {object_name} created and finalized." in out
+ blob = json_client.bucket(_ZONAL_BUCKET).get_blob(object_name)
+ blob.delete()
+
+
+def test_storage_pause_and_resume_appendable_upload(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-pause-resume-{uuid.uuid4()}"
+ event_loop.run_until_complete(
+ storage_pause_and_resume_appendable_upload.storage_pause_and_resume_appendable_upload(
+ _ZONAL_BUCKET, object_name, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert "First writer closed. Upload is 'paused'." in out
+ assert "Second writer closed. Full object uploaded." in out
+
+ blob = json_client.bucket(_ZONAL_BUCKET).get_blob(object_name)
+ blob.delete()
+
+
+def test_storage_read_appendable_object_tail(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-read-tail-{uuid.uuid4()}"
+ event_loop.run_until_complete(
+ storage_read_appendable_object_tail.read_appendable_object_tail(
+ _ZONAL_BUCKET, object_name, duration=3, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert f"Created empty appendable object: {object_name}" in out
+ assert "Appender started." in out
+ assert "Tailer started." in out
+ assert "Tailer read" in out
+ assert "Tailer finished." in out
+ assert "Writer closed." in out
+
+ bucket = json_client.bucket(_ZONAL_BUCKET)
+ blob = bucket.blob(object_name)
+ blob.delete()
+
+
+def test_storage_open_object_read_full_object(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-open-read-full-{uuid.uuid4()}"
+ data = b"Hello, is it me you're looking for?"
+ event_loop.run_until_complete(
+ create_appendable_object(async_grpc_client, object_name, data)
+ )
+ event_loop.run_until_complete(
+ storage_open_object_read_full_object.storage_open_object_read_full_object(
+ _ZONAL_BUCKET, object_name, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert (
+ f"Downloaded all {len(data)} bytes from object {object_name} in bucket {_ZONAL_BUCKET}."
+ in out
+ )
+ blob = json_client.bucket(_ZONAL_BUCKET).blob(object_name)
+ blob.delete()
+
+
+def test_storage_open_object_single_ranged_read(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-open-single-range-{uuid.uuid4()}"
+ event_loop.run_until_complete(
+ create_appendable_object(
+ async_grpc_client, object_name, b"Hello, is it me you're looking for?"
+ )
+ )
+ download_size = 5
+ event_loop.run_until_complete(
+ storage_open_object_single_ranged_read.storage_open_object_single_ranged_read(
+ _ZONAL_BUCKET,
+ object_name,
+ start_byte=0,
+ size=download_size,
+ grpc_client=async_grpc_client,
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert f"Downloaded {download_size} bytes from {object_name}" in out
+ blob = json_client.bucket(_ZONAL_BUCKET).blob(object_name)
+ blob.delete()
+
+
+def test_storage_open_object_multiple_ranged_read(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ object_name = f"test-open-multi-range-{uuid.uuid4()}"
+ data = b"a" * 100
+ event_loop.run_until_complete(
+ create_appendable_object(async_grpc_client, object_name, data)
+ )
+ event_loop.run_until_complete(
+ storage_open_object_multiple_ranged_read.storage_open_object_multiple_ranged_read(
+ _ZONAL_BUCKET, object_name, grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert "Downloaded 10 bytes into buffer 1 from start byte 0: b'aaaaaaaaaa'" in out
+ assert "Downloaded 10 bytes into buffer 2 from start byte 20: b'aaaaaaaaaa'" in out
+ assert "Downloaded 10 bytes into buffer 3 from start byte 40: b'aaaaaaaaaa'" in out
+ assert "Downloaded 10 bytes into buffer 4 from start byte 60: b'aaaaaaaaaa'" in out
+ blob = json_client.bucket(_ZONAL_BUCKET).blob(object_name)
+ blob.delete()
+
+
+def test_storage_open_multiple_objects_ranged_read(
+ async_grpc_client, json_client, event_loop, capsys
+):
+ blob1_name = f"multi-obj-1-{uuid.uuid4()}"
+ blob2_name = f"multi-obj-2-{uuid.uuid4()}"
+ data1 = b"Content of object 1"
+ data2 = b"Content of object 2"
+ event_loop.run_until_complete(
+ create_appendable_object(async_grpc_client, blob1_name, data1)
+ )
+ event_loop.run_until_complete(
+ create_appendable_object(async_grpc_client, blob2_name, data2)
+ )
+
+ event_loop.run_until_complete(
+ storage_open_multiple_objects_ranged_read.storage_open_multiple_objects_ranged_read(
+ _ZONAL_BUCKET, [blob1_name, blob2_name], grpc_client=async_grpc_client
+ )
+ )
+ out, _ = capsys.readouterr()
+ assert f"Downloaded {len(data1)} bytes from {blob1_name}" in out
+ assert f"Downloaded {len(data2)} bytes from {blob2_name}" in out
+ blob1 = json_client.bucket(_ZONAL_BUCKET).blob(blob1_name)
+ blob2 = json_client.bucket(_ZONAL_BUCKET).blob(blob2_name)
+ blob1.delete()
+ blob2.delete()