1# Copyright 2014 Google LLC
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Create / interact with Google Cloud Storage buckets."""
16
17import base64
18import copy
19import datetime
20import json
21from urllib.parse import urlsplit
22import warnings
23
24from google.api_core import datetime_helpers
25from google.cloud._helpers import _datetime_to_rfc3339
26from google.cloud._helpers import _rfc3339_nanos_to_datetime
27from google.cloud.exceptions import NotFound
28from google.api_core.iam import Policy
29from google.cloud.storage import _signing
30from google.cloud.storage._helpers import _add_etag_match_headers
31from google.cloud.storage._helpers import _add_generation_match_parameters
32from google.cloud.storage._helpers import _NOW
33from google.cloud.storage._helpers import _PropertyMixin
34from google.cloud.storage._helpers import _UTC
35from google.cloud.storage._helpers import _scalar_property
36from google.cloud.storage._helpers import _validate_name
37from google.cloud.storage._signing import generate_signed_url_v2
38from google.cloud.storage._signing import generate_signed_url_v4
39from google.cloud.storage._helpers import _bucket_bound_hostname_url
40from google.cloud.storage._helpers import _virtual_hosted_style_base_url
41from google.cloud.storage._opentelemetry_tracing import create_trace_span
42from google.cloud.storage.acl import BucketACL
43from google.cloud.storage.acl import DefaultObjectACL
44from google.cloud.storage.blob import Blob
45from google.cloud.storage.constants import _DEFAULT_TIMEOUT
46from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS
47from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS
48from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE
49from google.cloud.storage.constants import (
50 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,
51)
52from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS
53from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE
54from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS
55from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
56from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS
57from google.cloud.storage.constants import REGION_LOCATION_TYPE
58from google.cloud.storage.constants import STANDARD_STORAGE_CLASS
59from google.cloud.storage.notification import BucketNotification
60from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
61from google.cloud.storage.retry import DEFAULT_RETRY
62from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
63from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON
64from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED
65
66
67_UBLA_BPO_ENABLED_MESSAGE = (
68 "Pass only one of 'uniform_bucket_level_access_enabled' / "
69 "'bucket_policy_only_enabled' to 'IAMConfiguration'."
70)
71_BPO_ENABLED_MESSAGE = (
72 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. "
73 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'."
74)
75_UBLA_BPO_LOCK_TIME_MESSAGE = (
76 "Pass only one of 'uniform_bucket_level_access_lock_time' / "
77 "'bucket_policy_only_lock_time' to 'IAMConfiguration'."
78)
79_BPO_LOCK_TIME_MESSAGE = (
80 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. "
81 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'."
82)
83_LOCATION_SETTER_MESSAGE = (
84 "Assignment to 'Bucket.location' is deprecated, as it is only "
85 "valid before the bucket is created. Instead, pass the location "
86 "to `Bucket.create`."
87)
88_FROM_STRING_MESSAGE = (
89 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead."
90)
91
92
93def _blobs_page_start(iterator, page, response):
94 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
95
96 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
97 :param iterator: The iterator that is currently in use.
98
99 :type page: :class:`~google.cloud.api.core.page_iterator.Page`
100 :param page: The page that was just created.
101
102 :type response: dict
103 :param response: The JSON API response for a page of blobs.
104 """
105 page.prefixes = tuple(response.get("prefixes", ()))
106 iterator.prefixes.update(page.prefixes)
107
108
109def _item_to_blob(iterator, item):
110 """Convert a JSON blob to the native object.
111
112 .. note::
113
114 This assumes that the ``bucket`` attribute has been
115 added to the iterator after being created.
116
117 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
118 :param iterator: The iterator that has retrieved the item.
119
120 :type item: dict
121 :param item: An item to be converted to a blob.
122
123 :rtype: :class:`.Blob`
124 :returns: The next blob in the page.
125 """
126 name = item.get("name")
127 blob = Blob(name, bucket=iterator.bucket)
128 blob._set_properties(item)
129 return blob
130
131
132def _item_to_notification(iterator, item):
133 """Convert a JSON blob to the native object.
134
135 .. note::
136
137 This assumes that the ``bucket`` attribute has been
138 added to the iterator after being created.
139
140 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
141 :param iterator: The iterator that has retrieved the item.
142
143 :type item: dict
144 :param item: An item to be converted to a blob.
145
146 :rtype: :class:`.BucketNotification`
147 :returns: The next notification being iterated.
148 """
149 return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
150
151
152class LifecycleRuleConditions(dict):
153 """Map a single lifecycle rule for a bucket.
154
155 See: https://cloud.google.com/storage/docs/lifecycle
156
157 :type age: int
158 :param age: (Optional) Apply rule action to items whose age, in days,
159 exceeds this value.
160
161 :type created_before: datetime.date
162 :param created_before: (Optional) Apply rule action to items created
163 before this date.
164
165 :type is_live: bool
166 :param is_live: (Optional) If true, apply rule action to non-versioned
167 items, or to items with no newer versions. If false, apply
168 rule action to versioned items with at least one newer
169 version.
170
171 :type matches_prefix: list(str)
172 :param matches_prefix: (Optional) Apply rule action to items which
173 any prefix matches the beginning of the item name.
174
175 :type matches_storage_class: list(str), one or more of
176 :attr:`Bucket.STORAGE_CLASSES`.
177 :param matches_storage_class: (Optional) Apply rule action to items
178 whose storage class matches this value.
179
180 :type matches_suffix: list(str)
181 :param matches_suffix: (Optional) Apply rule action to items which
182 any suffix matches the end of the item name.
183
184 :type number_of_newer_versions: int
185 :param number_of_newer_versions: (Optional) Apply rule action to versioned
186 items having N newer versions.
187
188 :type days_since_custom_time: int
189 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days
190 elapsed since the custom timestamp. This condition is relevant
191 only for versioned objects. The value of the field must be a non
192 negative integer. If it's zero, the object version will become
193 eligible for lifecycle action as soon as it becomes custom.
194
195 :type custom_time_before: :class:`datetime.date`
196 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action
197 to items whose custom time is before this date. This condition is relevant
198 only for versioned objects, e.g., 2019-03-16.
199
200 :type days_since_noncurrent_time: int
201 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days
202 elapsed since the non current timestamp. This condition
203 is relevant only for versioned objects. The value of the field
204 must be a non negative integer. If it's zero, the object version
205 will become eligible for lifecycle action as soon as it becomes
206 non current.
207
208 :type noncurrent_time_before: :class:`datetime.date`
209 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply
210 rule action to items whose non current time is before this date.
211 This condition is relevant only for versioned objects, e.g, 2019-03-16.
212
213 :raises ValueError: if no arguments are passed.
214 """
215
216 def __init__(
217 self,
218 age=None,
219 created_before=None,
220 is_live=None,
221 matches_storage_class=None,
222 number_of_newer_versions=None,
223 days_since_custom_time=None,
224 custom_time_before=None,
225 days_since_noncurrent_time=None,
226 noncurrent_time_before=None,
227 matches_prefix=None,
228 matches_suffix=None,
229 _factory=False,
230 ):
231 conditions = {}
232
233 if age is not None:
234 conditions["age"] = age
235
236 if created_before is not None:
237 conditions["createdBefore"] = created_before.isoformat()
238
239 if is_live is not None:
240 conditions["isLive"] = is_live
241
242 if matches_storage_class is not None:
243 conditions["matchesStorageClass"] = matches_storage_class
244
245 if number_of_newer_versions is not None:
246 conditions["numNewerVersions"] = number_of_newer_versions
247
248 if days_since_custom_time is not None:
249 conditions["daysSinceCustomTime"] = days_since_custom_time
250
251 if custom_time_before is not None:
252 conditions["customTimeBefore"] = custom_time_before.isoformat()
253
254 if days_since_noncurrent_time is not None:
255 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time
256
257 if noncurrent_time_before is not None:
258 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat()
259
260 if matches_prefix is not None:
261 conditions["matchesPrefix"] = matches_prefix
262
263 if matches_suffix is not None:
264 conditions["matchesSuffix"] = matches_suffix
265
266 if not _factory and not conditions:
267 raise ValueError("Supply at least one condition")
268
269 super(LifecycleRuleConditions, self).__init__(conditions)
270
271 @classmethod
272 def from_api_repr(cls, resource):
273 """Factory: construct instance from resource.
274
275 :type resource: dict
276 :param resource: mapping as returned from API call.
277
278 :rtype: :class:`LifecycleRuleConditions`
279 :returns: Instance created from resource.
280 """
281 instance = cls(_factory=True)
282 instance.update(resource)
283 return instance
284
285 @property
286 def age(self):
287 """Conditon's age value."""
288 return self.get("age")
289
290 @property
291 def created_before(self):
292 """Conditon's created_before value."""
293 before = self.get("createdBefore")
294 if before is not None:
295 return datetime_helpers.from_iso8601_date(before)
296
297 @property
298 def is_live(self):
299 """Conditon's 'is_live' value."""
300 return self.get("isLive")
301
302 @property
303 def matches_prefix(self):
304 """Conditon's 'matches_prefix' value."""
305 return self.get("matchesPrefix")
306
307 @property
308 def matches_storage_class(self):
309 """Conditon's 'matches_storage_class' value."""
310 return self.get("matchesStorageClass")
311
312 @property
313 def matches_suffix(self):
314 """Conditon's 'matches_suffix' value."""
315 return self.get("matchesSuffix")
316
317 @property
318 def number_of_newer_versions(self):
319 """Conditon's 'number_of_newer_versions' value."""
320 return self.get("numNewerVersions")
321
322 @property
323 def days_since_custom_time(self):
324 """Conditon's 'days_since_custom_time' value."""
325 return self.get("daysSinceCustomTime")
326
327 @property
328 def custom_time_before(self):
329 """Conditon's 'custom_time_before' value."""
330 before = self.get("customTimeBefore")
331 if before is not None:
332 return datetime_helpers.from_iso8601_date(before)
333
334 @property
335 def days_since_noncurrent_time(self):
336 """Conditon's 'days_since_noncurrent_time' value."""
337 return self.get("daysSinceNoncurrentTime")
338
339 @property
340 def noncurrent_time_before(self):
341 """Conditon's 'noncurrent_time_before' value."""
342 before = self.get("noncurrentTimeBefore")
343 if before is not None:
344 return datetime_helpers.from_iso8601_date(before)
345
346
347class LifecycleRuleDelete(dict):
348 """Map a lifecycle rule deleting matching items.
349
350 :type kw: dict
351 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
352 """
353
354 def __init__(self, **kw):
355 conditions = LifecycleRuleConditions(**kw)
356 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)}
357 super().__init__(rule)
358
359 @classmethod
360 def from_api_repr(cls, resource):
361 """Factory: construct instance from resource.
362
363 :type resource: dict
364 :param resource: mapping as returned from API call.
365
366 :rtype: :class:`LifecycleRuleDelete`
367 :returns: Instance created from resource.
368 """
369 instance = cls(_factory=True)
370 instance.update(resource)
371 return instance
372
373
374class LifecycleRuleSetStorageClass(dict):
375 """Map a lifecycle rule updating storage class of matching items.
376
377 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`.
378 :param storage_class: new storage class to assign to matching items.
379
380 :type kw: dict
381 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
382 """
383
384 def __init__(self, storage_class, **kw):
385 conditions = LifecycleRuleConditions(**kw)
386 rule = {
387 "action": {"type": "SetStorageClass", "storageClass": storage_class},
388 "condition": dict(conditions),
389 }
390 super().__init__(rule)
391
392 @classmethod
393 def from_api_repr(cls, resource):
394 """Factory: construct instance from resource.
395
396 :type resource: dict
397 :param resource: mapping as returned from API call.
398
399 :rtype: :class:`LifecycleRuleSetStorageClass`
400 :returns: Instance created from resource.
401 """
402 action = resource["action"]
403 instance = cls(action["storageClass"], _factory=True)
404 instance.update(resource)
405 return instance
406
407
408class LifecycleRuleAbortIncompleteMultipartUpload(dict):
409 """Map a rule aborting incomplete multipart uploads of matching items.
410
411 The "age" lifecycle condition is the only supported condition for this rule.
412
413 :type kw: dict
414 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
415 """
416
417 def __init__(self, **kw):
418 conditions = LifecycleRuleConditions(**kw)
419 rule = {
420 "action": {"type": "AbortIncompleteMultipartUpload"},
421 "condition": dict(conditions),
422 }
423 super().__init__(rule)
424
425 @classmethod
426 def from_api_repr(cls, resource):
427 """Factory: construct instance from resource.
428
429 :type resource: dict
430 :param resource: mapping as returned from API call.
431
432 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload`
433 :returns: Instance created from resource.
434 """
435 instance = cls(_factory=True)
436 instance.update(resource)
437 return instance
438
439
440_default = object()
441
442
443class IAMConfiguration(dict):
444 """Map a bucket's IAM configuration.
445
446 :type bucket: :class:`Bucket`
447 :params bucket: Bucket for which this instance is the policy.
448
449 :type public_access_prevention: str
450 :params public_access_prevention:
451 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced'
452 See: https://cloud.google.com/storage/docs/public-access-prevention
453
454 :type uniform_bucket_level_access_enabled: bool
455 :params bucket_policy_only_enabled:
456 (Optional) Whether the IAM-only policy is enabled for the bucket.
457
458 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime`
459 :params uniform_bucket_level_locked_time:
460 (Optional) When the bucket's IAM-only policy was enabled.
461 This value should normally only be set by the back-end API.
462
463 :type bucket_policy_only_enabled: bool
464 :params bucket_policy_only_enabled:
465 Deprecated alias for :data:`uniform_bucket_level_access_enabled`.
466
467 :type bucket_policy_only_locked_time: :class:`datetime.datetime`
468 :params bucket_policy_only_locked_time:
469 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`.
470 """
471
472 def __init__(
473 self,
474 bucket,
475 public_access_prevention=_default,
476 uniform_bucket_level_access_enabled=_default,
477 uniform_bucket_level_access_locked_time=_default,
478 bucket_policy_only_enabled=_default,
479 bucket_policy_only_locked_time=_default,
480 ):
481 if bucket_policy_only_enabled is not _default:
482 if uniform_bucket_level_access_enabled is not _default:
483 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE)
484
485 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
486 uniform_bucket_level_access_enabled = bucket_policy_only_enabled
487
488 if bucket_policy_only_locked_time is not _default:
489 if uniform_bucket_level_access_locked_time is not _default:
490 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE)
491
492 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2)
493 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time
494
495 if uniform_bucket_level_access_enabled is _default:
496 uniform_bucket_level_access_enabled = False
497
498 if public_access_prevention is _default:
499 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED
500
501 data = {
502 "uniformBucketLevelAccess": {
503 "enabled": uniform_bucket_level_access_enabled
504 },
505 "publicAccessPrevention": public_access_prevention,
506 }
507 if uniform_bucket_level_access_locked_time is not _default:
508 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339(
509 uniform_bucket_level_access_locked_time
510 )
511 super(IAMConfiguration, self).__init__(data)
512 self._bucket = bucket
513
514 @classmethod
515 def from_api_repr(cls, resource, bucket):
516 """Factory: construct instance from resource.
517
518 :type bucket: :class:`Bucket`
519 :params bucket: Bucket for which this instance is the policy.
520
521 :type resource: dict
522 :param resource: mapping as returned from API call.
523
524 :rtype: :class:`IAMConfiguration`
525 :returns: Instance created from resource.
526 """
527 instance = cls(bucket)
528 instance.update(resource)
529 return instance
530
531 @property
532 def bucket(self):
533 """Bucket for which this instance is the policy.
534
535 :rtype: :class:`Bucket`
536 :returns: the instance's bucket.
537 """
538 return self._bucket
539
540 @property
541 def public_access_prevention(self):
542 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'.
543
544 See: https://cloud.google.com/storage/docs/public-access-prevention
545
546 :rtype: string
547 :returns: the public access prevention status, either 'enforced' or 'inherited'.
548 """
549 return self["publicAccessPrevention"]
550
551 @public_access_prevention.setter
552 def public_access_prevention(self, value):
553 self["publicAccessPrevention"] = value
554 self.bucket._patch_property("iamConfiguration", self)
555
556 @property
557 def uniform_bucket_level_access_enabled(self):
558 """If set, access checks only use bucket-level IAM policies or above.
559
560 :rtype: bool
561 :returns: whether the bucket is configured to allow only IAM.
562 """
563 ubla = self.get("uniformBucketLevelAccess", {})
564 return ubla.get("enabled", False)
565
566 @uniform_bucket_level_access_enabled.setter
567 def uniform_bucket_level_access_enabled(self, value):
568 ubla = self.setdefault("uniformBucketLevelAccess", {})
569 ubla["enabled"] = bool(value)
570 self.bucket._patch_property("iamConfiguration", self)
571
572 @property
573 def uniform_bucket_level_access_locked_time(self):
574 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false.
575
576 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property
577 is time time after which that setting becomes immutable.
578
579 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property
580 is ``None``.
581
582 :rtype: Union[:class:`datetime.datetime`, None]
583 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will
584 be frozen as true.
585 """
586 ubla = self.get("uniformBucketLevelAccess", {})
587 stamp = ubla.get("lockedTime")
588 if stamp is not None:
589 stamp = _rfc3339_nanos_to_datetime(stamp)
590 return stamp
591
592 @property
593 def bucket_policy_only_enabled(self):
594 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`.
595
596 :rtype: bool
597 :returns: whether the bucket is configured to allow only IAM.
598 """
599 return self.uniform_bucket_level_access_enabled
600
601 @bucket_policy_only_enabled.setter
602 def bucket_policy_only_enabled(self, value):
603 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
604 self.uniform_bucket_level_access_enabled = value
605
606 @property
607 def bucket_policy_only_locked_time(self):
608 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`.
609
610 :rtype: Union[:class:`datetime.datetime`, None]
611 :returns:
612 (readonly) Time after which :attr:`bucket_policy_only_enabled` will
613 be frozen as true.
614 """
615 return self.uniform_bucket_level_access_locked_time
616
617
618class Bucket(_PropertyMixin):
619 """A class representing a Bucket on Cloud Storage.
620
621 :type client: :class:`google.cloud.storage.client.Client`
622 :param client: A client which holds credentials and project configuration
623 for the bucket (which requires a project).
624
625 :type name: str
626 :param name: The name of the bucket. Bucket names must start and end with a
627 number or letter.
628
629 :type user_project: str
630 :param user_project: (Optional) the project ID to be billed for API
631 requests made via this instance.
632
633 :type generation: int
634 :param generation: (Optional) If present, selects a specific revision of
635 this bucket.
636 """
637
638 _MAX_OBJECTS_FOR_ITERATION = 256
639 """Maximum number of existing objects allowed in iteration.
640
641 This is used in Bucket.delete() and Bucket.make_public().
642 """
643
644 STORAGE_CLASSES = (
645 STANDARD_STORAGE_CLASS,
646 NEARLINE_STORAGE_CLASS,
647 COLDLINE_STORAGE_CLASS,
648 ARCHIVE_STORAGE_CLASS,
649 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy
650 REGIONAL_LEGACY_STORAGE_CLASS, # legacy
651 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy
652 )
653 """Allowed values for :attr:`storage_class`.
654
655 Default value is :attr:`STANDARD_STORAGE_CLASS`.
656
657 See
658 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
659 https://cloud.google.com/storage/docs/storage-classes
660 """
661
662 _LOCATION_TYPES = (
663 MULTI_REGION_LOCATION_TYPE,
664 REGION_LOCATION_TYPE,
665 DUAL_REGION_LOCATION_TYPE,
666 )
667 """Allowed values for :attr:`location_type`."""
668
669 def __init__(self, client, name=None, user_project=None, generation=None):
670 """
671 property :attr:`name`
672 Get the bucket's name.
673 """
674 name = _validate_name(name)
675 super(Bucket, self).__init__(name=name)
676 self._client = client
677 self._acl = BucketACL(self)
678 self._default_object_acl = DefaultObjectACL(self)
679 self._label_removals = set()
680 self._user_project = user_project
681
682 if generation is not None:
683 self._properties["generation"] = generation
684
685 def __repr__(self):
686 return f"<Bucket: {self.name}>"
687
688 @property
689 def client(self):
690 """The client bound to this bucket."""
691 return self._client
692
693 def _set_properties(self, value):
694 """Set the properties for the current object.
695
696 :type value: dict or :class:`google.cloud.storage.batch._FutureDict`
697 :param value: The properties to be set.
698 """
699 self._label_removals.clear()
700 return super(Bucket, self)._set_properties(value)
701
702 @property
703 def rpo(self):
704 """Get the RPO (Recovery Point Objective) of this bucket
705
706 See: https://cloud.google.com/storage/docs/managing-turbo-replication
707
708 "ASYNC_TURBO" or "DEFAULT"
709 :rtype: str
710 """
711 return self._properties.get("rpo")
712
713 @rpo.setter
714 def rpo(self, value):
715 """
716 Set the RPO (Recovery Point Objective) of this bucket.
717
718 See: https://cloud.google.com/storage/docs/managing-turbo-replication
719
720 :type value: str
721 :param value: "ASYNC_TURBO" or "DEFAULT"
722 """
723 self._patch_property("rpo", value)
724
725 @property
726 def user_project(self):
727 """Project ID to be billed for API requests made via this bucket.
728
729 If unset, API requests are billed to the bucket owner.
730
731 A user project is required for all operations on Requester Pays buckets.
732
733 See https://cloud.google.com/storage/docs/requester-pays#requirements for details.
734
735 :rtype: str
736 """
737 return self._user_project
738
739 @property
740 def generation(self):
741 """Retrieve the generation for the bucket.
742
743 :rtype: int or ``NoneType``
744 :returns: The generation of the bucket or ``None`` if the bucket's
745 resource has not been loaded from the server.
746 """
747 generation = self._properties.get("generation")
748 if generation is not None:
749 return int(generation)
750
751 @property
752 def soft_delete_time(self):
753 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted.
754
755 :rtype: :class:`datetime.datetime` or ``NoneType``
756 :returns:
757 (readonly) The time that the bucket became soft-deleted.
758 Note this property is only set for soft-deleted buckets.
759 """
760 soft_delete_time = self._properties.get("softDeleteTime")
761 if soft_delete_time is not None:
762 return _rfc3339_nanos_to_datetime(soft_delete_time)
763
764 @property
765 def hard_delete_time(self):
766 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted.
767
768 :rtype: :class:`datetime.datetime` or ``NoneType``
769 :returns:
770 (readonly) The time that the bucket will be permanently deleted.
771 Note this property is only set for soft-deleted buckets.
772 """
773 hard_delete_time = self._properties.get("hardDeleteTime")
774 if hard_delete_time is not None:
775 return _rfc3339_nanos_to_datetime(hard_delete_time)
776
777 @property
778 def _query_params(self):
779 """Default query parameters."""
780 params = super()._query_params
781 return params
782
783 @classmethod
784 def from_uri(cls, uri, client=None):
785 """Get a constructor for bucket object by URI.
786
787 .. code-block:: python
788
789 from google.cloud import storage
790 from google.cloud.storage.bucket import Bucket
791 client = storage.Client()
792 bucket = Bucket.from_uri("gs://bucket", client=client)
793
794 :type uri: str
795 :param uri: The bucket uri pass to get bucket object.
796
797 :type client: :class:`~google.cloud.storage.client.Client` or
798 ``NoneType``
799 :param client: (Optional) The client to use. Application code should
800 *always* pass ``client``.
801
802 :rtype: :class:`google.cloud.storage.bucket.Bucket`
803 :returns: The bucket object created.
804 """
805 scheme, netloc, path, query, frag = urlsplit(uri)
806
807 if scheme != "gs":
808 raise ValueError("URI scheme must be gs")
809
810 return cls(client, name=netloc)
811
812 @classmethod
813 def from_string(cls, uri, client=None):
814 """Get a constructor for bucket object by URI.
815
816 .. note::
817 Deprecated alias for :meth:`from_uri`.
818
819 .. code-block:: python
820
821 from google.cloud import storage
822 from google.cloud.storage.bucket import Bucket
823 client = storage.Client()
824 bucket = Bucket.from_string("gs://bucket", client=client)
825
826 :type uri: str
827 :param uri: The bucket uri pass to get bucket object.
828
829 :type client: :class:`~google.cloud.storage.client.Client` or
830 ``NoneType``
831 :param client: (Optional) The client to use. Application code should
832 *always* pass ``client``.
833
834 :rtype: :class:`google.cloud.storage.bucket.Bucket`
835 :returns: The bucket object created.
836 """
837 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2)
838 return Bucket.from_uri(uri=uri, client=client)
839
840 def blob(
841 self,
842 blob_name,
843 chunk_size=None,
844 encryption_key=None,
845 kms_key_name=None,
846 generation=None,
847 ):
848 """Factory constructor for blob object.
849
850 .. note::
851 This will not make an HTTP request; it simply instantiates
852 a blob object owned by this bucket.
853
854 :type blob_name: str
855 :param blob_name: The name of the blob to be instantiated.
856
857 :type chunk_size: int
858 :param chunk_size: The size of a chunk of data whenever iterating
859 (in bytes). This must be a multiple of 256 KB per
860 the API specification.
861
862 :type encryption_key: bytes
863 :param encryption_key:
864 (Optional) 32 byte encryption key for customer-supplied encryption.
865
866 :type kms_key_name: str
867 :param kms_key_name:
868 (Optional) Resource name of KMS key used to encrypt blob's content.
869
870 :type generation: long
871 :param generation: (Optional) If present, selects a specific revision of
872 this object.
873
874 :rtype: :class:`google.cloud.storage.blob.Blob`
875 :returns: The blob object created.
876 """
877 return Blob(
878 name=blob_name,
879 bucket=self,
880 chunk_size=chunk_size,
881 encryption_key=encryption_key,
882 kms_key_name=kms_key_name,
883 generation=generation,
884 )
885
886 def notification(
887 self,
888 topic_name=None,
889 topic_project=None,
890 custom_attributes=None,
891 event_types=None,
892 blob_name_prefix=None,
893 payload_format=NONE_PAYLOAD_FORMAT,
894 notification_id=None,
895 ):
896 """Factory: create a notification resource for the bucket.
897
898 See: :class:`.BucketNotification` for parameters.
899
900 :rtype: :class:`.BucketNotification`
901 """
902 return BucketNotification(
903 self,
904 topic_name=topic_name,
905 topic_project=topic_project,
906 custom_attributes=custom_attributes,
907 event_types=event_types,
908 blob_name_prefix=blob_name_prefix,
909 payload_format=payload_format,
910 notification_id=notification_id,
911 )
912
913 def exists(
914 self,
915 client=None,
916 timeout=_DEFAULT_TIMEOUT,
917 if_etag_match=None,
918 if_etag_not_match=None,
919 if_metageneration_match=None,
920 if_metageneration_not_match=None,
921 retry=DEFAULT_RETRY,
922 ):
923 """Determines whether or not this bucket exists.
924
925 If :attr:`user_project` is set, bills the API request to that project.
926
927 :type client: :class:`~google.cloud.storage.client.Client` or
928 ``NoneType``
929 :param client: (Optional) The client to use. If not passed, falls back
930 to the ``client`` stored on the current bucket.
931
932 :type timeout: float or tuple
933 :param timeout:
934 (Optional) The amount of time, in seconds, to wait
935 for the server response. See: :ref:`configuring_timeouts`
936
937 :type if_etag_match: Union[str, Set[str]]
938 :param if_etag_match: (Optional) Make the operation conditional on whether the
939 bucket's current ETag matches the given value.
940
941 :type if_etag_not_match: Union[str, Set[str]])
942 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
943 bucket's current ETag does not match the given value.
944
945 :type if_metageneration_match: long
946 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
947 bucket's current metageneration matches the given value.
948
949 :type if_metageneration_not_match: long
950 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
951 bucket's current metageneration does not match the given value.
952
953 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
954 :param retry:
955 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
956
957 :rtype: bool
958 :returns: True if the bucket exists in Cloud Storage.
959 """
960 with create_trace_span(name="Storage.Bucket.exists"):
961 client = self._require_client(client)
962 # We only need the status code (200 or not) so we seek to
963 # minimize the returned payload.
964 query_params = {"fields": "name"}
965
966 if self.user_project is not None:
967 query_params["userProject"] = self.user_project
968
969 _add_generation_match_parameters(
970 query_params,
971 if_metageneration_match=if_metageneration_match,
972 if_metageneration_not_match=if_metageneration_not_match,
973 )
974
975 headers = {}
976 _add_etag_match_headers(
977 headers,
978 if_etag_match=if_etag_match,
979 if_etag_not_match=if_etag_not_match,
980 )
981
982 try:
983 # We intentionally pass `_target_object=None` since fields=name
984 # would limit the local properties.
985 client._get_resource(
986 self.path,
987 query_params=query_params,
988 headers=headers,
989 timeout=timeout,
990 retry=retry,
991 _target_object=None,
992 )
993 except NotFound:
994 # NOTE: This will not fail immediately in a batch. However, when
995 # Batch.finish() is called, the resulting `NotFound` will be
996 # raised.
997 return False
998 return True
999
1000 def create(
1001 self,
1002 client=None,
1003 project=None,
1004 location=None,
1005 predefined_acl=None,
1006 predefined_default_object_acl=None,
1007 enable_object_retention=False,
1008 timeout=_DEFAULT_TIMEOUT,
1009 retry=DEFAULT_RETRY,
1010 ):
1011 """Creates current bucket.
1012
1013 If the bucket already exists, will raise
1014 :class:`google.cloud.exceptions.Conflict`.
1015
1016 This implements "storage.buckets.insert".
1017
1018 If :attr:`user_project` is set, bills the API request to that project.
1019
1020 :type client: :class:`~google.cloud.storage.client.Client` or
1021 ``NoneType``
1022 :param client: (Optional) The client to use. If not passed, falls back
1023 to the ``client`` stored on the current bucket.
1024
1025 :type project: str
1026 :param project: (Optional) The project under which the bucket is to
1027 be created. If not passed, uses the project set on
1028 the client.
1029 :raises ValueError: if ``project`` is None and client's
1030 :attr:`project` is also None.
1031
1032 :type location: str
1033 :param location: (Optional) The location of the bucket. If not passed,
1034 the default location, US, will be used. See
1035 https://cloud.google.com/storage/docs/bucket-locations
1036
1037 :type predefined_acl: str
1038 :param predefined_acl:
1039 (Optional) Name of predefined ACL to apply to bucket. See:
1040 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1041
1042 :type predefined_default_object_acl: str
1043 :param predefined_default_object_acl:
1044 (Optional) Name of predefined ACL to apply to bucket's objects. See:
1045 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1046
1047 :type enable_object_retention: bool
1048 :param enable_object_retention:
1049 (Optional) Whether object retention should be enabled on this bucket. See:
1050 https://cloud.google.com/storage/docs/object-lock
1051
1052 :type timeout: float or tuple
1053 :param timeout:
1054 (Optional) The amount of time, in seconds, to wait
1055 for the server response. See: :ref:`configuring_timeouts`
1056
1057 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1058 :param retry:
1059 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1060 """
1061 with create_trace_span(name="Storage.Bucket.create"):
1062 client = self._require_client(client)
1063 client.create_bucket(
1064 bucket_or_name=self,
1065 project=project,
1066 user_project=self.user_project,
1067 location=location,
1068 predefined_acl=predefined_acl,
1069 predefined_default_object_acl=predefined_default_object_acl,
1070 enable_object_retention=enable_object_retention,
1071 timeout=timeout,
1072 retry=retry,
1073 )
1074
1075 def update(
1076 self,
1077 client=None,
1078 timeout=_DEFAULT_TIMEOUT,
1079 if_metageneration_match=None,
1080 if_metageneration_not_match=None,
1081 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1082 ):
1083 """Sends all properties in a PUT request.
1084
1085 Updates the ``_properties`` with the response from the backend.
1086
1087 If :attr:`user_project` is set, bills the API request to that project.
1088
1089 :type client: :class:`~google.cloud.storage.client.Client` or
1090 ``NoneType``
1091 :param client: the client to use. If not passed, falls back to the
1092 ``client`` stored on the current object.
1093
1094 :type timeout: float or tuple
1095 :param timeout:
1096 (Optional) The amount of time, in seconds, to wait
1097 for the server response. See: :ref:`configuring_timeouts`
1098
1099 :type if_metageneration_match: long
1100 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1101 blob's current metageneration matches the given value.
1102
1103 :type if_metageneration_not_match: long
1104 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1105 blob's current metageneration does not match the given value.
1106
1107 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1108 :param retry:
1109 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1110 """
1111 with create_trace_span(name="Storage.Bucket.update"):
1112 super(Bucket, self).update(
1113 client=client,
1114 timeout=timeout,
1115 if_metageneration_match=if_metageneration_match,
1116 if_metageneration_not_match=if_metageneration_not_match,
1117 retry=retry,
1118 )
1119
1120 def reload(
1121 self,
1122 client=None,
1123 projection="noAcl",
1124 timeout=_DEFAULT_TIMEOUT,
1125 if_etag_match=None,
1126 if_etag_not_match=None,
1127 if_metageneration_match=None,
1128 if_metageneration_not_match=None,
1129 retry=DEFAULT_RETRY,
1130 soft_deleted=None,
1131 ):
1132 """Reload properties from Cloud Storage.
1133
1134 If :attr:`user_project` is set, bills the API request to that project.
1135
1136 :type client: :class:`~google.cloud.storage.client.Client` or
1137 ``NoneType``
1138 :param client: the client to use. If not passed, falls back to the
1139 ``client`` stored on the current object.
1140
1141 :type projection: str
1142 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1143 Defaults to ``'noAcl'``. Specifies the set of
1144 properties to return.
1145
1146 :type timeout: float or tuple
1147 :param timeout:
1148 (Optional) The amount of time, in seconds, to wait
1149 for the server response. See: :ref:`configuring_timeouts`
1150
1151 :type if_etag_match: Union[str, Set[str]]
1152 :param if_etag_match: (Optional) Make the operation conditional on whether the
1153 bucket's current ETag matches the given value.
1154
1155 :type if_etag_not_match: Union[str, Set[str]])
1156 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
1157 bucket's current ETag does not match the given value.
1158
1159 :type if_metageneration_match: long
1160 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1161 bucket's current metageneration matches the given value.
1162
1163 :type if_metageneration_not_match: long
1164 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1165 bucket's current metageneration does not match the given value.
1166
1167 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1168 :param retry:
1169 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1170
1171 :type soft_deleted: bool
1172 :param soft_deleted: (Optional) If True, looks for a soft-deleted
1173 bucket. Will only return the bucket metadata if the bucket exists
1174 and is in a soft-deleted state. The bucket ``generation`` must be
1175 set if ``soft_deleted`` is set to True.
1176 See: https://cloud.google.com/storage/docs/soft-delete
1177 """
1178 with create_trace_span(name="Storage.Bucket.reload"):
1179 super(Bucket, self).reload(
1180 client=client,
1181 projection=projection,
1182 timeout=timeout,
1183 if_etag_match=if_etag_match,
1184 if_etag_not_match=if_etag_not_match,
1185 if_metageneration_match=if_metageneration_match,
1186 if_metageneration_not_match=if_metageneration_not_match,
1187 retry=retry,
1188 soft_deleted=soft_deleted,
1189 )
1190
1191 def patch(
1192 self,
1193 client=None,
1194 timeout=_DEFAULT_TIMEOUT,
1195 if_metageneration_match=None,
1196 if_metageneration_not_match=None,
1197 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1198 ):
1199 """Sends all changed properties in a PATCH request.
1200
1201 Updates the ``_properties`` with the response from the backend.
1202
1203 If :attr:`user_project` is set, bills the API request to that project.
1204
1205 :type client: :class:`~google.cloud.storage.client.Client` or
1206 ``NoneType``
1207 :param client: the client to use. If not passed, falls back to the
1208 ``client`` stored on the current object.
1209
1210 :type timeout: float or tuple
1211 :param timeout:
1212 (Optional) The amount of time, in seconds, to wait
1213 for the server response. See: :ref:`configuring_timeouts`
1214
1215 :type if_metageneration_match: long
1216 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1217 blob's current metageneration matches the given value.
1218
1219 :type if_metageneration_not_match: long
1220 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1221 blob's current metageneration does not match the given value.
1222
1223 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1224 :param retry:
1225 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1226 """
1227 with create_trace_span(name="Storage.Bucket.patch"):
1228 # Special case: For buckets, it is possible that labels are being
1229 # removed; this requires special handling.
1230 if self._label_removals:
1231 self._changes.add("labels")
1232 self._properties.setdefault("labels", {})
1233 for removed_label in self._label_removals:
1234 self._properties["labels"][removed_label] = None
1235
1236 # Call the superclass method.
1237 super(Bucket, self).patch(
1238 client=client,
1239 if_metageneration_match=if_metageneration_match,
1240 if_metageneration_not_match=if_metageneration_not_match,
1241 timeout=timeout,
1242 retry=retry,
1243 )
1244
1245 @property
1246 def acl(self):
1247 """Create our ACL on demand."""
1248 return self._acl
1249
1250 @property
1251 def default_object_acl(self):
1252 """Create our defaultObjectACL on demand."""
1253 return self._default_object_acl
1254
1255 @staticmethod
1256 def path_helper(bucket_name):
1257 """Relative URL path for a bucket.
1258
1259 :type bucket_name: str
1260 :param bucket_name: The bucket name in the path.
1261
1262 :rtype: str
1263 :returns: The relative URL path for ``bucket_name``.
1264 """
1265 return "/b/" + bucket_name
1266
1267 @property
1268 def path(self):
1269 """The URL path to this bucket."""
1270 if not self.name:
1271 raise ValueError("Cannot determine path without bucket name.")
1272
1273 return self.path_helper(self.name)
1274
1275 def get_blob(
1276 self,
1277 blob_name,
1278 client=None,
1279 encryption_key=None,
1280 generation=None,
1281 if_etag_match=None,
1282 if_etag_not_match=None,
1283 if_generation_match=None,
1284 if_generation_not_match=None,
1285 if_metageneration_match=None,
1286 if_metageneration_not_match=None,
1287 timeout=_DEFAULT_TIMEOUT,
1288 retry=DEFAULT_RETRY,
1289 soft_deleted=None,
1290 **kwargs,
1291 ):
1292 """Get a blob object by name.
1293
1294 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python)
1295 on how to retrieve metadata of an object.
1296
1297 If :attr:`user_project` is set, bills the API request to that project.
1298
1299 :type blob_name: str
1300 :param blob_name: The name of the blob to retrieve.
1301
1302 :type client: :class:`~google.cloud.storage.client.Client` or
1303 ``NoneType``
1304 :param client: (Optional) The client to use. If not passed, falls back
1305 to the ``client`` stored on the current bucket.
1306
1307 :type encryption_key: bytes
1308 :param encryption_key:
1309 (Optional) 32 byte encryption key for customer-supplied encryption.
1310 See
1311 https://cloud.google.com/storage/docs/encryption#customer-supplied.
1312
1313 :type generation: long
1314 :param generation:
1315 (Optional) If present, selects a specific revision of this object.
1316
1317 :type if_etag_match: Union[str, Set[str]]
1318 :param if_etag_match:
1319 (Optional) See :ref:`using-if-etag-match`
1320
1321 :type if_etag_not_match: Union[str, Set[str]]
1322 :param if_etag_not_match:
1323 (Optional) See :ref:`using-if-etag-not-match`
1324
1325 :type if_generation_match: long
1326 :param if_generation_match:
1327 (Optional) See :ref:`using-if-generation-match`
1328
1329 :type if_generation_not_match: long
1330 :param if_generation_not_match:
1331 (Optional) See :ref:`using-if-generation-not-match`
1332
1333 :type if_metageneration_match: long
1334 :param if_metageneration_match:
1335 (Optional) See :ref:`using-if-metageneration-match`
1336
1337 :type if_metageneration_not_match: long
1338 :param if_metageneration_not_match:
1339 (Optional) See :ref:`using-if-metageneration-not-match`
1340
1341 :type timeout: float or tuple
1342 :param timeout:
1343 (Optional) The amount of time, in seconds, to wait
1344 for the server response. See: :ref:`configuring_timeouts`
1345
1346 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1347 :param retry:
1348 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1349
1350 :type soft_deleted: bool
1351 :param soft_deleted:
1352 (Optional) If True, looks for a soft-deleted object. Will only return
1353 the object metadata if the object exists and is in a soft-deleted state.
1354 Object ``generation`` is required if ``soft_deleted`` is set to True.
1355 See: https://cloud.google.com/storage/docs/soft-delete
1356
1357 :param kwargs: Keyword arguments to pass to the
1358 :class:`~google.cloud.storage.blob.Blob` constructor.
1359
1360 :rtype: :class:`google.cloud.storage.blob.Blob` or None
1361 :returns: The blob object if it exists, otherwise None.
1362 """
1363 with create_trace_span(name="Storage.Bucket.getBlob"):
1364 blob = Blob(
1365 bucket=self,
1366 name=blob_name,
1367 encryption_key=encryption_key,
1368 generation=generation,
1369 **kwargs,
1370 )
1371 try:
1372 # NOTE: This will not fail immediately in a batch. However, when
1373 # Batch.finish() is called, the resulting `NotFound` will be
1374 # raised.
1375 blob.reload(
1376 client=client,
1377 timeout=timeout,
1378 if_etag_match=if_etag_match,
1379 if_etag_not_match=if_etag_not_match,
1380 if_generation_match=if_generation_match,
1381 if_generation_not_match=if_generation_not_match,
1382 if_metageneration_match=if_metageneration_match,
1383 if_metageneration_not_match=if_metageneration_not_match,
1384 retry=retry,
1385 soft_deleted=soft_deleted,
1386 )
1387 except NotFound:
1388 return None
1389 else:
1390 return blob
1391
1392 def list_blobs(
1393 self,
1394 max_results=None,
1395 page_token=None,
1396 prefix=None,
1397 delimiter=None,
1398 start_offset=None,
1399 end_offset=None,
1400 include_trailing_delimiter=None,
1401 versions=None,
1402 projection="noAcl",
1403 fields=None,
1404 client=None,
1405 timeout=_DEFAULT_TIMEOUT,
1406 retry=DEFAULT_RETRY,
1407 match_glob=None,
1408 include_folders_as_prefixes=None,
1409 soft_deleted=None,
1410 page_size=None,
1411 ):
1412 """Return an iterator used to find blobs in the bucket.
1413
1414 If :attr:`user_project` is set, bills the API request to that project.
1415
1416 :type max_results: int
1417 :param max_results:
1418 (Optional) The maximum number of blobs to return.
1419
1420 :type page_token: str
1421 :param page_token:
1422 (Optional) If present, return the next batch of blobs, using the
1423 value, which must correspond to the ``nextPageToken`` value
1424 returned in the previous response. Deprecated: use the ``pages``
1425 property of the returned iterator instead of manually passing the
1426 token.
1427
1428 :type prefix: str
1429 :param prefix: (Optional) Prefix used to filter blobs.
1430
1431 :type delimiter: str
1432 :param delimiter: (Optional) Delimiter, used with ``prefix`` to
1433 emulate hierarchy.
1434
1435 :type start_offset: str
1436 :param start_offset:
1437 (Optional) Filter results to objects whose names are
1438 lexicographically equal to or after ``startOffset``. If
1439 ``endOffset`` is also set, the objects listed will have names
1440 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive).
1441
1442 :type end_offset: str
1443 :param end_offset:
1444 (Optional) Filter results to objects whose names are
1445 lexicographically before ``endOffset``. If ``startOffset`` is also
1446 set, the objects listed will have names between ``startOffset``
1447 (inclusive) and ``endOffset`` (exclusive).
1448
1449 :type include_trailing_delimiter: boolean
1450 :param include_trailing_delimiter:
1451 (Optional) If true, objects that end in exactly one instance of
1452 ``delimiter`` will have their metadata included in ``items`` in
1453 addition to ``prefixes``.
1454
1455 :type versions: bool
1456 :param versions: (Optional) Whether object versions should be returned
1457 as separate blobs.
1458
1459 :type projection: str
1460 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1461 Defaults to ``'noAcl'``. Specifies the set of
1462 properties to return.
1463
1464 :type fields: str
1465 :param fields:
1466 (Optional) Selector specifying which fields to include
1467 in a partial response. Must be a list of fields. For
1468 example to get a partial response with just the next
1469 page token and the name and language of each blob returned:
1470 ``'items(name,contentLanguage),nextPageToken'``.
1471 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
1472
1473 :type client: :class:`~google.cloud.storage.client.Client`
1474 :param client: (Optional) The client to use. If not passed, falls back
1475 to the ``client`` stored on the current bucket.
1476
1477 :type timeout: float or tuple
1478 :param timeout:
1479 (Optional) The amount of time, in seconds, to wait
1480 for the server response. See: :ref:`configuring_timeouts`
1481
1482 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1483 :param retry:
1484 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1485
1486 :type match_glob: str
1487 :param match_glob:
1488 (Optional) A glob pattern used to filter results (for example, foo*bar).
1489 The string value must be UTF-8 encoded. See:
1490 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob
1491
1492 :type include_folders_as_prefixes: bool
1493 (Optional) If true, includes Folders and Managed Folders in the set of
1494 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /.
1495 See: https://cloud.google.com/storage/docs/managed-folders
1496
1497 :type soft_deleted: bool
1498 :param soft_deleted:
1499 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing
1500 generation number. This parameter can only be used successfully if the bucket has a soft delete policy.
1501 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See:
1502 https://cloud.google.com/storage/docs/soft-delete
1503
1504 :type page_size: int
1505 :param page_size:
1506 (Optional) Maximum number of blobs to return in each page.
1507 Defaults to a value set by the API.
1508
1509 :rtype: :class:`~google.api_core.page_iterator.Iterator`
1510 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
1511 in this bucket matching the arguments.
1512 """
1513 with create_trace_span(name="Storage.Bucket.listBlobs"):
1514 client = self._require_client(client)
1515 return client.list_blobs(
1516 self,
1517 max_results=max_results,
1518 page_token=page_token,
1519 prefix=prefix,
1520 delimiter=delimiter,
1521 start_offset=start_offset,
1522 end_offset=end_offset,
1523 include_trailing_delimiter=include_trailing_delimiter,
1524 versions=versions,
1525 projection=projection,
1526 fields=fields,
1527 page_size=page_size,
1528 timeout=timeout,
1529 retry=retry,
1530 match_glob=match_glob,
1531 include_folders_as_prefixes=include_folders_as_prefixes,
1532 soft_deleted=soft_deleted,
1533 )
1534
1535 def list_notifications(
1536 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
1537 ):
1538 """List Pub / Sub notifications for this bucket.
1539
1540 See:
1541 https://cloud.google.com/storage/docs/json_api/v1/notifications/list
1542
1543 If :attr:`user_project` is set, bills the API request to that project.
1544
1545 :type client: :class:`~google.cloud.storage.client.Client` or
1546 ``NoneType``
1547 :param client: (Optional) The client to use. If not passed, falls back
1548 to the ``client`` stored on the current bucket.
1549 :type timeout: float or tuple
1550 :param timeout:
1551 (Optional) The amount of time, in seconds, to wait
1552 for the server response. See: :ref:`configuring_timeouts`
1553
1554 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1555 :param retry:
1556 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1557
1558 :rtype: list of :class:`.BucketNotification`
1559 :returns: notification instances
1560 """
1561 with create_trace_span(name="Storage.Bucket.listNotifications"):
1562 client = self._require_client(client)
1563 path = self.path + "/notificationConfigs"
1564 iterator = client._list_resource(
1565 path,
1566 _item_to_notification,
1567 timeout=timeout,
1568 retry=retry,
1569 )
1570 iterator.bucket = self
1571 return iterator
1572
1573 def get_notification(
1574 self,
1575 notification_id,
1576 client=None,
1577 timeout=_DEFAULT_TIMEOUT,
1578 retry=DEFAULT_RETRY,
1579 ):
1580 """Get Pub / Sub notification for this bucket.
1581
1582 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get)
1583 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python).
1584
1585 If :attr:`user_project` is set, bills the API request to that project.
1586
1587 :type notification_id: str
1588 :param notification_id: The notification id to retrieve the notification configuration.
1589
1590 :type client: :class:`~google.cloud.storage.client.Client` or
1591 ``NoneType``
1592 :param client: (Optional) The client to use. If not passed, falls back
1593 to the ``client`` stored on the current bucket.
1594 :type timeout: float or tuple
1595 :param timeout:
1596 (Optional) The amount of time, in seconds, to wait
1597 for the server response. See: :ref:`configuring_timeouts`
1598
1599 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1600 :param retry:
1601 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1602
1603 :rtype: :class:`.BucketNotification`
1604 :returns: notification instance.
1605 """
1606 with create_trace_span(name="Storage.Bucket.getNotification"):
1607 notification = self.notification(notification_id=notification_id)
1608 notification.reload(client=client, timeout=timeout, retry=retry)
1609 return notification
1610
1611 def delete(
1612 self,
1613 force=False,
1614 client=None,
1615 if_metageneration_match=None,
1616 if_metageneration_not_match=None,
1617 timeout=_DEFAULT_TIMEOUT,
1618 retry=DEFAULT_RETRY,
1619 ):
1620 """Delete this bucket.
1621
1622 The bucket **must** be empty in order to submit a delete request. If
1623 ``force=True`` is passed, this will first attempt to delete all the
1624 objects / blobs in the bucket (i.e. try to empty the bucket).
1625
1626 If the bucket doesn't exist, this will raise
1627 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
1628 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`.
1629
1630 If ``force=True`` and the bucket contains more than 256 objects / blobs
1631 this will cowardly refuse to delete the objects (or the bucket). This
1632 is to prevent accidental bucket deletion and to prevent extremely long
1633 runtime of this method. Also note that ``force=True`` is not supported
1634 in a ``Batch`` context.
1635
1636 If :attr:`user_project` is set, bills the API request to that project.
1637
1638 :type force: bool
1639 :param force: If True, empties the bucket's objects then deletes it.
1640
1641 :type client: :class:`~google.cloud.storage.client.Client` or
1642 ``NoneType``
1643 :param client: (Optional) The client to use. If not passed, falls back
1644 to the ``client`` stored on the current bucket.
1645
1646 :type if_metageneration_match: long
1647 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1648 blob's current metageneration matches the given value.
1649
1650 :type if_metageneration_not_match: long
1651 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1652 blob's current metageneration does not match the given value.
1653
1654 :type timeout: float or tuple
1655 :param timeout:
1656 (Optional) The amount of time, in seconds, to wait
1657 for the server response. See: :ref:`configuring_timeouts`
1658
1659 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1660 :param retry:
1661 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1662
1663 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
1664 contains more than 256 objects / blobs.
1665 """
1666 with create_trace_span(name="Storage.Bucket.delete"):
1667 client = self._require_client(client)
1668 query_params = {}
1669
1670 if self.user_project is not None:
1671 query_params["userProject"] = self.user_project
1672
1673 _add_generation_match_parameters(
1674 query_params,
1675 if_metageneration_match=if_metageneration_match,
1676 if_metageneration_not_match=if_metageneration_not_match,
1677 )
1678 if force:
1679 blobs = list(
1680 self.list_blobs(
1681 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
1682 client=client,
1683 timeout=timeout,
1684 retry=retry,
1685 versions=True,
1686 )
1687 )
1688 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
1689 message = (
1690 "Refusing to delete bucket with more than "
1691 "%d objects. If you actually want to delete "
1692 "this bucket, please delete the objects "
1693 "yourself before calling Bucket.delete()."
1694 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
1695 raise ValueError(message)
1696
1697 # Ignore 404 errors on delete.
1698 self.delete_blobs(
1699 blobs,
1700 on_error=lambda blob: None,
1701 client=client,
1702 timeout=timeout,
1703 retry=retry,
1704 preserve_generation=True,
1705 )
1706
1707 # We intentionally pass `_target_object=None` since a DELETE
1708 # request has no response value (whether in a standard request or
1709 # in a batch request).
1710 client._delete_resource(
1711 self.path,
1712 query_params=query_params,
1713 timeout=timeout,
1714 retry=retry,
1715 _target_object=None,
1716 )
1717
1718 def delete_blob(
1719 self,
1720 blob_name,
1721 client=None,
1722 generation=None,
1723 if_generation_match=None,
1724 if_generation_not_match=None,
1725 if_metageneration_match=None,
1726 if_metageneration_not_match=None,
1727 timeout=_DEFAULT_TIMEOUT,
1728 retry=DEFAULT_RETRY,
1729 ):
1730 """Deletes a blob from the current bucket.
1731
1732 If :attr:`user_project` is set, bills the API request to that project.
1733
1734 :type blob_name: str
1735 :param blob_name: A blob name to delete.
1736
1737 :type client: :class:`~google.cloud.storage.client.Client` or
1738 ``NoneType``
1739 :param client: (Optional) The client to use. If not passed, falls back
1740 to the ``client`` stored on the current bucket.
1741
1742 :type generation: long
1743 :param generation: (Optional) If present, permanently deletes a specific
1744 revision of this object.
1745
1746 :type if_generation_match: long
1747 :param if_generation_match:
1748 (Optional) See :ref:`using-if-generation-match`
1749
1750 :type if_generation_not_match: long
1751 :param if_generation_not_match:
1752 (Optional) See :ref:`using-if-generation-not-match`
1753
1754 :type if_metageneration_match: long
1755 :param if_metageneration_match:
1756 (Optional) See :ref:`using-if-metageneration-match`
1757
1758 :type if_metageneration_not_match: long
1759 :param if_metageneration_not_match:
1760 (Optional) See :ref:`using-if-metageneration-not-match`
1761
1762 :type timeout: float or tuple
1763 :param timeout:
1764 (Optional) The amount of time, in seconds, to wait
1765 for the server response. See: :ref:`configuring_timeouts`
1766
1767 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1768 :param retry: (Optional) How to retry the RPC. A None value will disable
1769 retries. A google.api_core.retry.Retry value will enable retries,
1770 and the object will define retriable response codes and errors and
1771 configure backoff and timeout options.
1772
1773 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1774 Retry object and activates it only if certain conditions are met.
1775 This class exists to provide safe defaults for RPC calls that are
1776 not technically safe to retry normally (due to potential data
1777 duplication or other side-effects) but become safe to retry if a
1778 condition such as if_generation_match is set.
1779
1780 See the retry.py source code and docstrings in this package
1781 (google.cloud.storage.retry) for information on retry types and how
1782 to configure them.
1783
1784 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound
1785 if the blob isn't found. To suppress
1786 the exception, use :meth:`delete_blobs` by passing a no-op
1787 ``on_error`` callback.
1788 """
1789 with create_trace_span(name="Storage.Bucket.deleteBlob"):
1790 client = self._require_client(client)
1791 blob = Blob(blob_name, bucket=self, generation=generation)
1792
1793 query_params = copy.deepcopy(blob._query_params)
1794 _add_generation_match_parameters(
1795 query_params,
1796 if_generation_match=if_generation_match,
1797 if_generation_not_match=if_generation_not_match,
1798 if_metageneration_match=if_metageneration_match,
1799 if_metageneration_not_match=if_metageneration_not_match,
1800 )
1801 # We intentionally pass `_target_object=None` since a DELETE
1802 # request has no response value (whether in a standard request or
1803 # in a batch request).
1804 client._delete_resource(
1805 blob.path,
1806 query_params=query_params,
1807 timeout=timeout,
1808 retry=retry,
1809 _target_object=None,
1810 )
1811
1812 def delete_blobs(
1813 self,
1814 blobs,
1815 on_error=None,
1816 client=None,
1817 preserve_generation=False,
1818 timeout=_DEFAULT_TIMEOUT,
1819 if_generation_match=None,
1820 if_generation_not_match=None,
1821 if_metageneration_match=None,
1822 if_metageneration_not_match=None,
1823 retry=DEFAULT_RETRY,
1824 ):
1825 """Deletes a list of blobs from the current bucket.
1826
1827 Uses :meth:`delete_blob` to delete each individual blob.
1828
1829 By default, any generation information in the list of blobs is ignored, and the
1830 live versions of all blobs are deleted. Set `preserve_generation` to True
1831 if blob generation should instead be propagated from the list of blobs.
1832
1833 If :attr:`user_project` is set, bills the API request to that project.
1834
1835 :type blobs: list
1836 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
1837 blob names to delete.
1838
1839 :type on_error: callable
1840 :param on_error: (Optional) Takes single argument: ``blob``.
1841 Called once for each blob raising
1842 :class:`~google.cloud.exceptions.NotFound`;
1843 otherwise, the exception is propagated.
1844 Note that ``on_error`` is not supported in a ``Batch`` context.
1845
1846 :type client: :class:`~google.cloud.storage.client.Client`
1847 :param client: (Optional) The client to use. If not passed, falls back
1848 to the ``client`` stored on the current bucket.
1849
1850 :type preserve_generation: bool
1851 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object,
1852 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob
1853 objects can have their generation set in this way.
1854 Default: False.
1855
1856 :type if_generation_match: list of long
1857 :param if_generation_match:
1858 (Optional) See :ref:`using-if-generation-match`
1859 Note that the length of the list must match the length of
1860 The list must match ``blobs`` item-to-item.
1861
1862 :type if_generation_not_match: list of long
1863 :param if_generation_not_match:
1864 (Optional) See :ref:`using-if-generation-not-match`
1865 The list must match ``blobs`` item-to-item.
1866
1867 :type if_metageneration_match: list of long
1868 :param if_metageneration_match:
1869 (Optional) See :ref:`using-if-metageneration-match`
1870 The list must match ``blobs`` item-to-item.
1871
1872 :type if_metageneration_not_match: list of long
1873 :param if_metageneration_not_match:
1874 (Optional) See :ref:`using-if-metageneration-not-match`
1875 The list must match ``blobs`` item-to-item.
1876
1877 :type timeout: float or tuple
1878 :param timeout:
1879 (Optional) The amount of time, in seconds, to wait
1880 for the server response. See: :ref:`configuring_timeouts`
1881
1882 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1883 :param retry: (Optional) How to retry the RPC. A None value will disable
1884 retries. A google.api_core.retry.Retry value will enable retries,
1885 and the object will define retriable response codes and errors and
1886 configure backoff and timeout options.
1887
1888 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1889 Retry object and activates it only if certain conditions are met.
1890 This class exists to provide safe defaults for RPC calls that are
1891 not technically safe to retry normally (due to potential data
1892 duplication or other side-effects) but become safe to retry if a
1893 condition such as if_generation_match is set.
1894
1895 See the retry.py source code and docstrings in this package
1896 (google.cloud.storage.retry) for information on retry types and how
1897 to configure them.
1898
1899 :raises: :class:`~google.cloud.exceptions.NotFound` (if
1900 `on_error` is not passed).
1901 """
1902 with create_trace_span(name="Storage.Bucket.deleteBlobs"):
1903 _raise_if_len_differs(
1904 len(blobs),
1905 if_generation_match=if_generation_match,
1906 if_generation_not_match=if_generation_not_match,
1907 if_metageneration_match=if_metageneration_match,
1908 if_metageneration_not_match=if_metageneration_not_match,
1909 )
1910 if_generation_match = iter(if_generation_match or [])
1911 if_generation_not_match = iter(if_generation_not_match or [])
1912 if_metageneration_match = iter(if_metageneration_match or [])
1913 if_metageneration_not_match = iter(if_metageneration_not_match or [])
1914
1915 for blob in blobs:
1916 try:
1917 blob_name = blob
1918 generation = None
1919 if not isinstance(blob_name, str):
1920 blob_name = blob.name
1921 generation = blob.generation if preserve_generation else None
1922
1923 self.delete_blob(
1924 blob_name,
1925 client=client,
1926 generation=generation,
1927 if_generation_match=next(if_generation_match, None),
1928 if_generation_not_match=next(if_generation_not_match, None),
1929 if_metageneration_match=next(if_metageneration_match, None),
1930 if_metageneration_not_match=next(
1931 if_metageneration_not_match, None
1932 ),
1933 timeout=timeout,
1934 retry=retry,
1935 )
1936 except NotFound:
1937 if on_error is not None:
1938 on_error(blob)
1939 else:
1940 raise
1941
1942 def copy_blob(
1943 self,
1944 blob,
1945 destination_bucket,
1946 new_name=None,
1947 client=None,
1948 preserve_acl=True,
1949 source_generation=None,
1950 if_generation_match=None,
1951 if_generation_not_match=None,
1952 if_metageneration_match=None,
1953 if_metageneration_not_match=None,
1954 if_source_generation_match=None,
1955 if_source_generation_not_match=None,
1956 if_source_metageneration_match=None,
1957 if_source_metageneration_not_match=None,
1958 timeout=_DEFAULT_TIMEOUT,
1959 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1960 ):
1961 """Copy the given blob to the given bucket, optionally with a new name.
1962
1963 If :attr:`user_project` is set, bills the API request to that project.
1964
1965 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy)
1966 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python).
1967
1968 :type blob: :class:`google.cloud.storage.blob.Blob`
1969 :param blob: The blob to be copied.
1970
1971 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
1972 :param destination_bucket: The bucket into which the blob should be
1973 copied.
1974
1975 :type new_name: str
1976 :param new_name: (Optional) The new name for the copied file.
1977
1978 :type client: :class:`~google.cloud.storage.client.Client` or
1979 ``NoneType``
1980 :param client: (Optional) The client to use. If not passed, falls back
1981 to the ``client`` stored on the current bucket.
1982
1983 :type preserve_acl: bool
1984 :param preserve_acl: DEPRECATED. This argument is not functional!
1985 (Optional) Copies ACL from old blob to new blob.
1986 Default: True.
1987 Note that ``preserve_acl`` is not supported in a
1988 ``Batch`` context.
1989
1990 :type source_generation: long
1991 :param source_generation: (Optional) The generation of the blob to be
1992 copied.
1993
1994 :type if_generation_match: long
1995 :param if_generation_match:
1996 (Optional) See :ref:`using-if-generation-match`
1997 Note that the generation to be matched is that of the
1998 ``destination`` blob.
1999
2000 :type if_generation_not_match: long
2001 :param if_generation_not_match:
2002 (Optional) See :ref:`using-if-generation-not-match`
2003 Note that the generation to be matched is that of the
2004 ``destination`` blob.
2005
2006 :type if_metageneration_match: long
2007 :param if_metageneration_match:
2008 (Optional) See :ref:`using-if-metageneration-match`
2009 Note that the metageneration to be matched is that of the
2010 ``destination`` blob.
2011
2012 :type if_metageneration_not_match: long
2013 :param if_metageneration_not_match:
2014 (Optional) See :ref:`using-if-metageneration-not-match`
2015 Note that the metageneration to be matched is that of the
2016 ``destination`` blob.
2017
2018 :type if_source_generation_match: long
2019 :param if_source_generation_match:
2020 (Optional) Makes the operation conditional on whether the source
2021 object's generation matches the given value.
2022
2023 :type if_source_generation_not_match: long
2024 :param if_source_generation_not_match:
2025 (Optional) Makes the operation conditional on whether the source
2026 object's generation does not match the given value.
2027
2028 :type if_source_metageneration_match: long
2029 :param if_source_metageneration_match:
2030 (Optional) Makes the operation conditional on whether the source
2031 object's current metageneration matches the given value.
2032
2033 :type if_source_metageneration_not_match: long
2034 :param if_source_metageneration_not_match:
2035 (Optional) Makes the operation conditional on whether the source
2036 object's current metageneration does not match the given value.
2037
2038 :type timeout: float or tuple
2039 :param timeout:
2040 (Optional) The amount of time, in seconds, to wait
2041 for the server response. See: :ref:`configuring_timeouts`
2042
2043 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2044 :param retry:
2045 (Optional) How to retry the RPC.
2046 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2047 policy which will only enable retries if ``if_generation_match`` or ``generation``
2048 is set, in order to ensure requests are idempotent before retrying them.
2049 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2050 to enable retries regardless of generation precondition setting.
2051 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2052
2053 :rtype: :class:`google.cloud.storage.blob.Blob`
2054 :returns: The new Blob.
2055 """
2056 with create_trace_span(name="Storage.Bucket.copyBlob"):
2057 client = self._require_client(client)
2058 query_params = {}
2059
2060 if self.user_project is not None:
2061 query_params["userProject"] = self.user_project
2062
2063 if source_generation is not None:
2064 query_params["sourceGeneration"] = source_generation
2065
2066 _add_generation_match_parameters(
2067 query_params,
2068 if_generation_match=if_generation_match,
2069 if_generation_not_match=if_generation_not_match,
2070 if_metageneration_match=if_metageneration_match,
2071 if_metageneration_not_match=if_metageneration_not_match,
2072 if_source_generation_match=if_source_generation_match,
2073 if_source_generation_not_match=if_source_generation_not_match,
2074 if_source_metageneration_match=if_source_metageneration_match,
2075 if_source_metageneration_not_match=if_source_metageneration_not_match,
2076 )
2077
2078 if new_name is None:
2079 new_name = blob.name
2080
2081 new_blob = Blob(bucket=destination_bucket, name=new_name)
2082 api_path = blob.path + "/copyTo" + new_blob.path
2083 copy_result = client._post_resource(
2084 api_path,
2085 None,
2086 query_params=query_params,
2087 timeout=timeout,
2088 retry=retry,
2089 _target_object=new_blob,
2090 )
2091
2092 if not preserve_acl:
2093 new_blob.acl.save(acl={}, client=client, timeout=timeout)
2094
2095 new_blob._set_properties(copy_result)
2096 return new_blob
2097
2098 def rename_blob(
2099 self,
2100 blob,
2101 new_name,
2102 client=None,
2103 if_generation_match=None,
2104 if_generation_not_match=None,
2105 if_metageneration_match=None,
2106 if_metageneration_not_match=None,
2107 if_source_generation_match=None,
2108 if_source_generation_not_match=None,
2109 if_source_metageneration_match=None,
2110 if_source_metageneration_not_match=None,
2111 timeout=_DEFAULT_TIMEOUT,
2112 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2113 ):
2114 """Rename the given blob using copy and delete operations.
2115
2116 If :attr:`user_project` is set, bills the API request to that project.
2117
2118 Effectively, copies blob to the same bucket with a new name, then
2119 deletes the blob.
2120
2121 .. warning::
2122
2123 This method will first duplicate the data and then delete the
2124 old blob. This means that with very large objects renaming
2125 could be a very (temporarily) costly or a very slow operation.
2126 If you need more control over the copy and deletion, instead
2127 use ``google.cloud.storage.blob.Blob.copy_to`` and
2128 ``google.cloud.storage.blob.Blob.delete`` directly.
2129
2130 Also note that this method is not fully supported in a
2131 ``Batch`` context.
2132
2133 :type blob: :class:`google.cloud.storage.blob.Blob`
2134 :param blob: The blob to be renamed.
2135
2136 :type new_name: str
2137 :param new_name: The new name for this blob.
2138
2139 :type client: :class:`~google.cloud.storage.client.Client` or
2140 ``NoneType``
2141 :param client: (Optional) The client to use. If not passed, falls back
2142 to the ``client`` stored on the current bucket.
2143
2144 :type if_generation_match: long
2145 :param if_generation_match:
2146 (Optional) See :ref:`using-if-generation-match`
2147 Note that the generation to be matched is that of the
2148 ``destination`` blob.
2149
2150 :type if_generation_not_match: long
2151 :param if_generation_not_match:
2152 (Optional) See :ref:`using-if-generation-not-match`
2153 Note that the generation to be matched is that of the
2154 ``destination`` blob.
2155
2156 :type if_metageneration_match: long
2157 :param if_metageneration_match:
2158 (Optional) See :ref:`using-if-metageneration-match`
2159 Note that the metageneration to be matched is that of the
2160 ``destination`` blob.
2161
2162 :type if_metageneration_not_match: long
2163 :param if_metageneration_not_match:
2164 (Optional) See :ref:`using-if-metageneration-not-match`
2165 Note that the metageneration to be matched is that of the
2166 ``destination`` blob.
2167
2168 :type if_source_generation_match: long
2169 :param if_source_generation_match:
2170 (Optional) Makes the operation conditional on whether the source
2171 object's generation matches the given value. Also used in the
2172 (implied) delete request.
2173
2174 :type if_source_generation_not_match: long
2175 :param if_source_generation_not_match:
2176 (Optional) Makes the operation conditional on whether the source
2177 object's generation does not match the given value. Also used in
2178 the (implied) delete request.
2179
2180 :type if_source_metageneration_match: long
2181 :param if_source_metageneration_match:
2182 (Optional) Makes the operation conditional on whether the source
2183 object's current metageneration matches the given value. Also used
2184 in the (implied) delete request.
2185
2186 :type if_source_metageneration_not_match: long
2187 :param if_source_metageneration_not_match:
2188 (Optional) Makes the operation conditional on whether the source
2189 object's current metageneration does not match the given value.
2190 Also used in the (implied) delete request.
2191
2192 :type timeout: float or tuple
2193 :param timeout:
2194 (Optional) The amount of time, in seconds, to wait
2195 for the server response. See: :ref:`configuring_timeouts`
2196
2197 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2198 :param retry:
2199 (Optional) How to retry the RPC.
2200 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2201 policy which will only enable retries if ``if_generation_match`` or ``generation``
2202 is set, in order to ensure requests are idempotent before retrying them.
2203 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2204 to enable retries regardless of generation precondition setting.
2205 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2206
2207 :rtype: :class:`Blob`
2208 :returns: The newly-renamed blob.
2209 """
2210 with create_trace_span(name="Storage.Bucket.renameBlob"):
2211 same_name = blob.name == new_name
2212
2213 new_blob = self.copy_blob(
2214 blob,
2215 self,
2216 new_name,
2217 client=client,
2218 timeout=timeout,
2219 if_generation_match=if_generation_match,
2220 if_generation_not_match=if_generation_not_match,
2221 if_metageneration_match=if_metageneration_match,
2222 if_metageneration_not_match=if_metageneration_not_match,
2223 if_source_generation_match=if_source_generation_match,
2224 if_source_generation_not_match=if_source_generation_not_match,
2225 if_source_metageneration_match=if_source_metageneration_match,
2226 if_source_metageneration_not_match=if_source_metageneration_not_match,
2227 retry=retry,
2228 )
2229
2230 if not same_name:
2231 blob.delete(
2232 client=client,
2233 timeout=timeout,
2234 if_generation_match=if_source_generation_match,
2235 if_generation_not_match=if_source_generation_not_match,
2236 if_metageneration_match=if_source_metageneration_match,
2237 if_metageneration_not_match=if_source_metageneration_not_match,
2238 retry=retry,
2239 )
2240 return new_blob
2241
2242 def move_blob(
2243 self,
2244 blob,
2245 new_name,
2246 client=None,
2247 if_generation_match=None,
2248 if_generation_not_match=None,
2249 if_metageneration_match=None,
2250 if_metageneration_not_match=None,
2251 if_source_generation_match=None,
2252 if_source_generation_not_match=None,
2253 if_source_metageneration_match=None,
2254 if_source_metageneration_not_match=None,
2255 timeout=_DEFAULT_TIMEOUT,
2256 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2257 ):
2258 """Move a blob to a new name atomically.
2259
2260 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2261
2262 :type blob: :class:`google.cloud.storage.blob.Blob`
2263 :param blob: The blob to be renamed.
2264
2265 :type new_name: str
2266 :param new_name: The new name for this blob.
2267
2268 :type client: :class:`~google.cloud.storage.client.Client` or
2269 ``NoneType``
2270 :param client: (Optional) The client to use. If not passed, falls back
2271 to the ``client`` stored on the current bucket.
2272
2273 :type if_generation_match: int
2274 :param if_generation_match:
2275 (Optional) See :ref:`using-if-generation-match`
2276 Note that the generation to be matched is that of the
2277 ``destination`` blob.
2278
2279 :type if_generation_not_match: int
2280 :param if_generation_not_match:
2281 (Optional) See :ref:`using-if-generation-not-match`
2282 Note that the generation to be matched is that of the
2283 ``destination`` blob.
2284
2285 :type if_metageneration_match: int
2286 :param if_metageneration_match:
2287 (Optional) See :ref:`using-if-metageneration-match`
2288 Note that the metageneration to be matched is that of the
2289 ``destination`` blob.
2290
2291 :type if_metageneration_not_match: int
2292 :param if_metageneration_not_match:
2293 (Optional) See :ref:`using-if-metageneration-not-match`
2294 Note that the metageneration to be matched is that of the
2295 ``destination`` blob.
2296
2297 :type if_source_generation_match: int
2298 :param if_source_generation_match:
2299 (Optional) Makes the operation conditional on whether the source
2300 object's generation matches the given value.
2301
2302 :type if_source_generation_not_match: int
2303 :param if_source_generation_not_match:
2304 (Optional) Makes the operation conditional on whether the source
2305 object's generation does not match the given value.
2306
2307 :type if_source_metageneration_match: int
2308 :param if_source_metageneration_match:
2309 (Optional) Makes the operation conditional on whether the source
2310 object's current metageneration matches the given value.
2311
2312 :type if_source_metageneration_not_match: int
2313 :param if_source_metageneration_not_match:
2314 (Optional) Makes the operation conditional on whether the source
2315 object's current metageneration does not match the given value.
2316
2317 :type timeout: float or tuple
2318 :param timeout:
2319 (Optional) The amount of time, in seconds, to wait
2320 for the server response. See: :ref:`configuring_timeouts`
2321
2322 :type retry: google.api_core.retry.Retry
2323 :param retry:
2324 (Optional) How to retry the RPC.
2325 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2326
2327 :rtype: :class:`Blob`
2328 :returns: The newly-moved blob.
2329 """
2330 with create_trace_span(name="Storage.Bucket.moveBlob"):
2331 client = self._require_client(client)
2332 query_params = {}
2333
2334 if self.user_project is not None:
2335 query_params["userProject"] = self.user_project
2336
2337 _add_generation_match_parameters(
2338 query_params,
2339 if_generation_match=if_generation_match,
2340 if_generation_not_match=if_generation_not_match,
2341 if_metageneration_match=if_metageneration_match,
2342 if_metageneration_not_match=if_metageneration_not_match,
2343 if_source_generation_match=if_source_generation_match,
2344 if_source_generation_not_match=if_source_generation_not_match,
2345 if_source_metageneration_match=if_source_metageneration_match,
2346 if_source_metageneration_not_match=if_source_metageneration_not_match,
2347 )
2348
2349 new_blob = Blob(bucket=self, name=new_name)
2350 api_path = blob.path + "/moveTo/o/" + new_blob.name
2351 move_result = client._post_resource(
2352 api_path,
2353 None,
2354 query_params=query_params,
2355 timeout=timeout,
2356 retry=retry,
2357 _target_object=new_blob,
2358 )
2359
2360 new_blob._set_properties(move_result)
2361 return new_blob
2362
2363 def restore_blob(
2364 self,
2365 blob_name,
2366 client=None,
2367 generation=None,
2368 copy_source_acl=None,
2369 projection=None,
2370 if_generation_match=None,
2371 if_generation_not_match=None,
2372 if_metageneration_match=None,
2373 if_metageneration_not_match=None,
2374 timeout=_DEFAULT_TIMEOUT,
2375 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2376 ):
2377 """Restores a soft-deleted object.
2378
2379 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2380
2381 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore)
2382
2383 :type blob_name: str
2384 :param blob_name: The name of the blob to be restored.
2385
2386 :type client: :class:`~google.cloud.storage.client.Client`
2387 :param client: (Optional) The client to use. If not passed, falls back
2388 to the ``client`` stored on the current bucket.
2389
2390 :type generation: int
2391 :param generation: Selects the specific revision of the object.
2392
2393 :type copy_source_acl: bool
2394 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls.
2395
2396 :type projection: str
2397 :param projection: (Optional) Specifies the set of properties to return.
2398 If used, must be 'full' or 'noAcl'.
2399
2400 :type if_generation_match: long
2401 :param if_generation_match:
2402 (Optional) See :ref:`using-if-generation-match`
2403
2404 :type if_generation_not_match: long
2405 :param if_generation_not_match:
2406 (Optional) See :ref:`using-if-generation-not-match`
2407
2408 :type if_metageneration_match: long
2409 :param if_metageneration_match:
2410 (Optional) See :ref:`using-if-metageneration-match`
2411
2412 :type if_metageneration_not_match: long
2413 :param if_metageneration_not_match:
2414 (Optional) See :ref:`using-if-metageneration-not-match`
2415
2416 :type timeout: float or tuple
2417 :param timeout:
2418 (Optional) The amount of time, in seconds, to wait
2419 for the server response. See: :ref:`configuring_timeouts`
2420
2421 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2422 :param retry:
2423 (Optional) How to retry the RPC.
2424 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which
2425 only restore operations with ``if_generation_match`` or ``generation`` set
2426 will be retried.
2427
2428 Users can configure non-default retry behavior. A ``None`` value will
2429 disable retries. A ``DEFAULT_RETRY`` value will enable retries
2430 even if restore operations are not guaranteed to be idempotent.
2431 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2432
2433 :rtype: :class:`google.cloud.storage.blob.Blob`
2434 :returns: The restored Blob.
2435 """
2436 with create_trace_span(name="Storage.Bucket.restore_blob"):
2437 client = self._require_client(client)
2438 query_params = {}
2439
2440 if self.user_project is not None:
2441 query_params["userProject"] = self.user_project
2442 if generation is not None:
2443 query_params["generation"] = generation
2444 if copy_source_acl is not None:
2445 query_params["copySourceAcl"] = copy_source_acl
2446 if projection is not None:
2447 query_params["projection"] = projection
2448
2449 _add_generation_match_parameters(
2450 query_params,
2451 if_generation_match=if_generation_match,
2452 if_generation_not_match=if_generation_not_match,
2453 if_metageneration_match=if_metageneration_match,
2454 if_metageneration_not_match=if_metageneration_not_match,
2455 )
2456
2457 blob = Blob(bucket=self, name=blob_name)
2458 api_response = client._post_resource(
2459 f"{blob.path}/restore",
2460 None,
2461 query_params=query_params,
2462 timeout=timeout,
2463 retry=retry,
2464 )
2465 blob._set_properties(api_response)
2466 return blob
2467
2468 @property
2469 def cors(self):
2470 """Retrieve or set CORS policies configured for this bucket.
2471
2472 See http://www.w3.org/TR/cors/ and
2473 https://cloud.google.com/storage/docs/json_api/v1/buckets
2474
2475 .. note::
2476
2477 The getter for this property returns a list which contains
2478 *copies* of the bucket's CORS policy mappings. Mutating the list
2479 or one of its dicts has no effect unless you then re-assign the
2480 dict via the setter. E.g.:
2481
2482 >>> policies = bucket.cors
2483 >>> policies.append({'origin': '/foo', ...})
2484 >>> policies[1]['maxAgeSeconds'] = 3600
2485 >>> del policies[0]
2486 >>> bucket.cors = policies
2487 >>> bucket.update()
2488
2489 :setter: Set CORS policies for this bucket.
2490 :getter: Gets the CORS policies for this bucket.
2491
2492 :rtype: list of dictionaries
2493 :returns: A sequence of mappings describing each CORS policy.
2494 """
2495 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())]
2496
2497 @cors.setter
2498 def cors(self, entries):
2499 """Set CORS policies configured for this bucket.
2500
2501 See http://www.w3.org/TR/cors/ and
2502 https://cloud.google.com/storage/docs/json_api/v1/buckets
2503
2504 :type entries: list of dictionaries
2505 :param entries: A sequence of mappings describing each CORS policy.
2506 """
2507 self._patch_property("cors", entries)
2508
2509 default_event_based_hold = _scalar_property("defaultEventBasedHold")
2510 """Are uploaded objects automatically placed under an even-based hold?
2511
2512 If True, uploaded objects will be placed under an event-based hold to
2513 be released at a future time. When released an object will then begin
2514 the retention period determined by the policy retention period for the
2515 object bucket.
2516
2517 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2518
2519 If the property is not set locally, returns ``None``.
2520
2521 :rtype: bool or ``NoneType``
2522 """
2523
2524 @property
2525 def default_kms_key_name(self):
2526 """Retrieve / set default KMS encryption key for objects in the bucket.
2527
2528 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2529
2530 :setter: Set default KMS encryption key for items in this bucket.
2531 :getter: Get default KMS encryption key for items in this bucket.
2532
2533 :rtype: str
2534 :returns: Default KMS encryption key, or ``None`` if not set.
2535 """
2536 encryption_config = self._properties.get("encryption", {})
2537 return encryption_config.get("defaultKmsKeyName")
2538
2539 @default_kms_key_name.setter
2540 def default_kms_key_name(self, value):
2541 """Set default KMS encryption key for objects in the bucket.
2542
2543 :type value: str or None
2544 :param value: new KMS key name (None to clear any existing key).
2545 """
2546 encryption_config = self._properties.get("encryption", {})
2547 encryption_config["defaultKmsKeyName"] = value
2548 self._patch_property("encryption", encryption_config)
2549
2550 @property
2551 def labels(self):
2552 """Retrieve or set labels assigned to this bucket.
2553
2554 See
2555 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2556
2557 .. note::
2558
2559 The getter for this property returns a dict which is a *copy*
2560 of the bucket's labels. Mutating that dict has no effect unless
2561 you then re-assign the dict via the setter. E.g.:
2562
2563 >>> labels = bucket.labels
2564 >>> labels['new_key'] = 'some-label'
2565 >>> del labels['old_key']
2566 >>> bucket.labels = labels
2567 >>> bucket.update()
2568
2569 :setter: Set labels for this bucket.
2570 :getter: Gets the labels for this bucket.
2571
2572 :rtype: :class:`dict`
2573 :returns: Name-value pairs (string->string) labelling the bucket.
2574 """
2575 labels = self._properties.get("labels")
2576 if labels is None:
2577 return {}
2578 return copy.deepcopy(labels)
2579
2580 @labels.setter
2581 def labels(self, mapping):
2582 """Set labels assigned to this bucket.
2583
2584 See
2585 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2586
2587 :type mapping: :class:`dict`
2588 :param mapping: Name-value pairs (string->string) labelling the bucket.
2589 """
2590 # If any labels have been expressly removed, we need to track this
2591 # so that a future .patch() call can do the correct thing.
2592 existing = set([k for k in self.labels.keys()])
2593 incoming = set([k for k in mapping.keys()])
2594 self._label_removals = self._label_removals.union(existing.difference(incoming))
2595 mapping = {k: str(v) for k, v in mapping.items()}
2596
2597 # Actually update the labels on the object.
2598 self._patch_property("labels", copy.deepcopy(mapping))
2599
2600 @property
2601 def etag(self):
2602 """Retrieve the ETag for the bucket.
2603
2604 See https://tools.ietf.org/html/rfc2616#section-3.11 and
2605 https://cloud.google.com/storage/docs/json_api/v1/buckets
2606
2607 :rtype: str or ``NoneType``
2608 :returns: The bucket etag or ``None`` if the bucket's
2609 resource has not been loaded from the server.
2610 """
2611 return self._properties.get("etag")
2612
2613 @property
2614 def id(self):
2615 """Retrieve the ID for the bucket.
2616
2617 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2618
2619 :rtype: str or ``NoneType``
2620 :returns: The ID of the bucket or ``None`` if the bucket's
2621 resource has not been loaded from the server.
2622 """
2623 return self._properties.get("id")
2624
2625 @property
2626 def iam_configuration(self):
2627 """Retrieve IAM configuration for this bucket.
2628
2629 :rtype: :class:`IAMConfiguration`
2630 :returns: an instance for managing the bucket's IAM configuration.
2631 """
2632 info = self._properties.get("iamConfiguration", {})
2633 return IAMConfiguration.from_api_repr(info, self)
2634
2635 @property
2636 def soft_delete_policy(self):
2637 """Retrieve the soft delete policy for this bucket.
2638
2639 See https://cloud.google.com/storage/docs/soft-delete
2640
2641 :rtype: :class:`SoftDeletePolicy`
2642 :returns: an instance for managing the bucket's soft delete policy.
2643 """
2644 policy = self._properties.get("softDeletePolicy", {})
2645 return SoftDeletePolicy.from_api_repr(policy, self)
2646
2647 @property
2648 def lifecycle_rules(self):
2649 """Retrieve or set lifecycle rules configured for this bucket.
2650
2651 See https://cloud.google.com/storage/docs/lifecycle and
2652 https://cloud.google.com/storage/docs/json_api/v1/buckets
2653
2654 .. note::
2655
2656 The getter for this property returns a generator which yields
2657 *copies* of the bucket's lifecycle rules mappings. Mutating the
2658 output dicts has no effect unless you then re-assign the dict via
2659 the setter. E.g.:
2660
2661 >>> rules = list(bucket.lifecycle_rules)
2662 >>> rules.append({'origin': '/foo', ...})
2663 >>> rules[1]['rule']['action']['type'] = 'Delete'
2664 >>> del rules[0]
2665 >>> bucket.lifecycle_rules = rules
2666 >>> bucket.update()
2667
2668 :setter: Set lifecycle rules for this bucket.
2669 :getter: Gets the lifecycle rules for this bucket.
2670
2671 :rtype: generator(dict)
2672 :returns: A sequence of mappings describing each lifecycle rule.
2673 """
2674 info = self._properties.get("lifecycle", {})
2675 for rule in info.get("rule", ()):
2676 action_type = rule["action"]["type"]
2677 if action_type == "Delete":
2678 yield LifecycleRuleDelete.from_api_repr(rule)
2679 elif action_type == "SetStorageClass":
2680 yield LifecycleRuleSetStorageClass.from_api_repr(rule)
2681 elif action_type == "AbortIncompleteMultipartUpload":
2682 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule)
2683 else:
2684 warnings.warn(
2685 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format(
2686 rule
2687 ),
2688 UserWarning,
2689 stacklevel=1,
2690 )
2691
2692 @lifecycle_rules.setter
2693 def lifecycle_rules(self, rules):
2694 """Set lifecycle rules configured for this bucket.
2695
2696 See https://cloud.google.com/storage/docs/lifecycle and
2697 https://cloud.google.com/storage/docs/json_api/v1/buckets
2698
2699 :type rules: list of dictionaries
2700 :param rules: A sequence of mappings describing each lifecycle rule.
2701 """
2702 rules = [dict(rule) for rule in rules] # Convert helpers if needed
2703 self._patch_property("lifecycle", {"rule": rules})
2704
2705 def clear_lifecycle_rules(self):
2706 """Clear lifecycle rules configured for this bucket.
2707
2708 See https://cloud.google.com/storage/docs/lifecycle and
2709 https://cloud.google.com/storage/docs/json_api/v1/buckets
2710 """
2711 self.lifecycle_rules = []
2712
2713 def clear_lifecyle_rules(self):
2714 """Deprecated alias for clear_lifecycle_rules."""
2715 return self.clear_lifecycle_rules()
2716
2717 def add_lifecycle_delete_rule(self, **kw):
2718 """Add a "delete" rule to lifecycle rules configured for this bucket.
2719
2720 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2721 which is set on the bucket. For the general format of a lifecycle configuration, see the
2722 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2723 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python).
2724
2725 :type kw: dict
2726 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2727 """
2728 rules = list(self.lifecycle_rules)
2729 rules.append(LifecycleRuleDelete(**kw))
2730 self.lifecycle_rules = rules
2731
2732 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
2733 """Add a "set storage class" rule to lifecycle rules.
2734
2735 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2736 which is set on the bucket. For the general format of a lifecycle configuration, see the
2737 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2738
2739 :type storage_class: str, one of :attr:`STORAGE_CLASSES`.
2740 :param storage_class: new storage class to assign to matching items.
2741
2742 :type kw: dict
2743 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2744 """
2745 rules = list(self.lifecycle_rules)
2746 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
2747 self.lifecycle_rules = rules
2748
2749 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw):
2750 """Add a "abort incomplete multipart upload" rule to lifecycle rules.
2751
2752 .. note::
2753 The "age" lifecycle condition is the only supported condition
2754 for this rule.
2755
2756 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2757 which is set on the bucket. For the general format of a lifecycle configuration, see the
2758 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2759
2760 :type kw: dict
2761 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2762 """
2763 rules = list(self.lifecycle_rules)
2764 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw))
2765 self.lifecycle_rules = rules
2766
2767 _location = _scalar_property("location")
2768
2769 @property
2770 def location(self):
2771 """Retrieve location configured for this bucket.
2772
2773 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2774 https://cloud.google.com/storage/docs/locations
2775
2776 Returns ``None`` if the property has not been set before creation,
2777 or if the bucket's resource has not been loaded from the server.
2778 :rtype: str or ``NoneType``
2779 """
2780 return self._location
2781
2782 @location.setter
2783 def location(self, value):
2784 """(Deprecated) Set `Bucket.location`
2785
2786 This can only be set at bucket **creation** time.
2787
2788 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2789 https://cloud.google.com/storage/docs/bucket-locations
2790
2791 .. warning::
2792
2793 Assignment to 'Bucket.location' is deprecated, as it is only
2794 valid before the bucket is created. Instead, pass the location
2795 to `Bucket.create`.
2796 """
2797 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
2798 self._location = value
2799
2800 @property
2801 def data_locations(self):
2802 """Retrieve the list of regional locations for custom dual-region buckets.
2803
2804 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2805 https://cloud.google.com/storage/docs/locations
2806
2807 Returns ``None`` if the property has not been set before creation,
2808 if the bucket's resource has not been loaded from the server,
2809 or if the bucket is not a dual-regions bucket.
2810 :rtype: list of str or ``NoneType``
2811 """
2812 custom_placement_config = self._properties.get("customPlacementConfig", {})
2813 return custom_placement_config.get("dataLocations")
2814
2815 @property
2816 def location_type(self):
2817 """Retrieve the location type for the bucket.
2818
2819 See https://cloud.google.com/storage/docs/storage-classes
2820
2821 :getter: Gets the the location type for this bucket.
2822
2823 :rtype: str or ``NoneType``
2824 :returns:
2825 If set, one of
2826 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`,
2827 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or
2828 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`,
2829 else ``None``.
2830 """
2831 return self._properties.get("locationType")
2832
2833 def get_logging(self):
2834 """Return info about access logging for this bucket.
2835
2836 See https://cloud.google.com/storage/docs/access-logs#status
2837
2838 :rtype: dict or None
2839 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
2840 (if logging is enabled), or None (if not).
2841 """
2842 info = self._properties.get("logging")
2843 return copy.deepcopy(info)
2844
2845 def enable_logging(self, bucket_name, object_prefix=""):
2846 """Enable access logging for this bucket.
2847
2848 See https://cloud.google.com/storage/docs/access-logs
2849
2850 :type bucket_name: str
2851 :param bucket_name: name of bucket in which to store access logs
2852
2853 :type object_prefix: str
2854 :param object_prefix: prefix for access log filenames
2855 """
2856 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
2857 self._patch_property("logging", info)
2858
2859 def disable_logging(self):
2860 """Disable access logging for this bucket.
2861
2862 See https://cloud.google.com/storage/docs/access-logs#disabling
2863 """
2864 self._patch_property("logging", None)
2865
2866 @property
2867 def metageneration(self):
2868 """Retrieve the metageneration for the bucket.
2869
2870 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2871
2872 :rtype: int or ``NoneType``
2873 :returns: The metageneration of the bucket or ``None`` if the bucket's
2874 resource has not been loaded from the server.
2875 """
2876 metageneration = self._properties.get("metageneration")
2877 if metageneration is not None:
2878 return int(metageneration)
2879
2880 @property
2881 def owner(self):
2882 """Retrieve info about the owner of the bucket.
2883
2884 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2885
2886 :rtype: dict or ``NoneType``
2887 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
2888 resource has not been loaded from the server.
2889 """
2890 return copy.deepcopy(self._properties.get("owner"))
2891
2892 @property
2893 def project_number(self):
2894 """Retrieve the number of the project to which the bucket is assigned.
2895
2896 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2897
2898 :rtype: int or ``NoneType``
2899 :returns: The project number that owns the bucket or ``None`` if
2900 the bucket's resource has not been loaded from the server.
2901 """
2902 project_number = self._properties.get("projectNumber")
2903 if project_number is not None:
2904 return int(project_number)
2905
2906 @property
2907 def retention_policy_effective_time(self):
2908 """Retrieve the effective time of the bucket's retention policy.
2909
2910 :rtype: datetime.datetime or ``NoneType``
2911 :returns: point-in time at which the bucket's retention policy is
2912 effective, or ``None`` if the property is not
2913 set locally.
2914 """
2915 policy = self._properties.get("retentionPolicy")
2916 if policy is not None:
2917 timestamp = policy.get("effectiveTime")
2918 if timestamp is not None:
2919 return _rfc3339_nanos_to_datetime(timestamp)
2920
2921 @property
2922 def retention_policy_locked(self):
2923 """Retrieve whthere the bucket's retention policy is locked.
2924
2925 :rtype: bool
2926 :returns: True if the bucket's policy is locked, or else False
2927 if the policy is not locked, or the property is not
2928 set locally.
2929 """
2930 policy = self._properties.get("retentionPolicy")
2931 if policy is not None:
2932 return policy.get("isLocked")
2933
2934 @property
2935 def retention_period(self):
2936 """Retrieve or set the retention period for items in the bucket.
2937
2938 :rtype: int or ``NoneType``
2939 :returns: number of seconds to retain items after upload or release
2940 from event-based lock, or ``None`` if the property is not
2941 set locally.
2942 """
2943 policy = self._properties.get("retentionPolicy")
2944 if policy is not None:
2945 period = policy.get("retentionPeriod")
2946 if period is not None:
2947 return int(period)
2948
2949 @retention_period.setter
2950 def retention_period(self, value):
2951 """Set the retention period for items in the bucket.
2952
2953 :type value: int
2954 :param value:
2955 number of seconds to retain items after upload or release from
2956 event-based lock.
2957
2958 :raises ValueError: if the bucket's retention policy is locked.
2959 """
2960 policy = self._properties.setdefault("retentionPolicy", {})
2961 if value is not None:
2962 policy["retentionPeriod"] = str(value)
2963 else:
2964 policy = None
2965 self._patch_property("retentionPolicy", policy)
2966
2967 @property
2968 def self_link(self):
2969 """Retrieve the URI for the bucket.
2970
2971 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2972
2973 :rtype: str or ``NoneType``
2974 :returns: The self link for the bucket or ``None`` if
2975 the bucket's resource has not been loaded from the server.
2976 """
2977 return self._properties.get("selfLink")
2978
2979 @property
2980 def storage_class(self):
2981 """Retrieve or set the storage class for the bucket.
2982
2983 See https://cloud.google.com/storage/docs/storage-classes
2984
2985 :setter: Set the storage class for this bucket.
2986 :getter: Gets the the storage class for this bucket.
2987
2988 :rtype: str or ``NoneType``
2989 :returns:
2990 If set, one of
2991 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
2992 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
2993 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
2994 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
2995 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
2996 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
2997 or
2998 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
2999 else ``None``.
3000 """
3001 return self._properties.get("storageClass")
3002
3003 @storage_class.setter
3004 def storage_class(self, value):
3005 """Set the storage class for the bucket.
3006
3007 See https://cloud.google.com/storage/docs/storage-classes
3008
3009 :type value: str
3010 :param value:
3011 One of
3012 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3013 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3014 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3015 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3016 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3017 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3018 or
3019 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3020 """
3021 self._patch_property("storageClass", value)
3022
3023 @property
3024 def time_created(self):
3025 """Retrieve the timestamp at which the bucket was created.
3026
3027 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3028
3029 :rtype: :class:`datetime.datetime` or ``NoneType``
3030 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3031 ``None`` if the bucket's resource has not been loaded
3032 from the server.
3033 """
3034 value = self._properties.get("timeCreated")
3035 if value is not None:
3036 return _rfc3339_nanos_to_datetime(value)
3037
3038 @property
3039 def updated(self):
3040 """Retrieve the timestamp at which the bucket was last updated.
3041
3042 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3043
3044 :rtype: :class:`datetime.datetime` or ``NoneType``
3045 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3046 ``None`` if the bucket's resource has not been loaded
3047 from the server.
3048 """
3049 value = self._properties.get("updated")
3050 if value is not None:
3051 return _rfc3339_nanos_to_datetime(value)
3052
3053 @property
3054 def versioning_enabled(self):
3055 """Is versioning enabled for this bucket?
3056
3057 See https://cloud.google.com/storage/docs/object-versioning for
3058 details.
3059
3060 :setter: Update whether versioning is enabled for this bucket.
3061 :getter: Query whether versioning is enabled for this bucket.
3062
3063 :rtype: bool
3064 :returns: True if enabled, else False.
3065 """
3066 versioning = self._properties.get("versioning", {})
3067 return versioning.get("enabled", False)
3068
3069 @versioning_enabled.setter
3070 def versioning_enabled(self, value):
3071 """Enable versioning for this bucket.
3072
3073 See https://cloud.google.com/storage/docs/object-versioning for
3074 details.
3075
3076 :type value: convertible to boolean
3077 :param value: should versioning be enabled for the bucket?
3078 """
3079 self._patch_property("versioning", {"enabled": bool(value)})
3080
3081 @property
3082 def requester_pays(self):
3083 """Does the requester pay for API requests for this bucket?
3084
3085 See https://cloud.google.com/storage/docs/requester-pays for
3086 details.
3087
3088 :setter: Update whether requester pays for this bucket.
3089 :getter: Query whether requester pays for this bucket.
3090
3091 :rtype: bool
3092 :returns: True if requester pays for API requests for the bucket,
3093 else False.
3094 """
3095 versioning = self._properties.get("billing", {})
3096 return versioning.get("requesterPays", False)
3097
3098 @requester_pays.setter
3099 def requester_pays(self, value):
3100 """Update whether requester pays for API requests for this bucket.
3101
3102 See https://cloud.google.com/storage/docs/using-requester-pays for
3103 details.
3104
3105 :type value: convertible to boolean
3106 :param value: should requester pay for API requests for the bucket?
3107 """
3108 self._patch_property("billing", {"requesterPays": bool(value)})
3109
3110 @property
3111 def autoclass_enabled(self):
3112 """Whether Autoclass is enabled for this bucket.
3113
3114 See https://cloud.google.com/storage/docs/using-autoclass for details.
3115
3116 :setter: Update whether autoclass is enabled for this bucket.
3117 :getter: Query whether autoclass is enabled for this bucket.
3118
3119 :rtype: bool
3120 :returns: True if enabled, else False.
3121 """
3122 autoclass = self._properties.get("autoclass", {})
3123 return autoclass.get("enabled", False)
3124
3125 @autoclass_enabled.setter
3126 def autoclass_enabled(self, value):
3127 """Enable or disable Autoclass at the bucket-level.
3128
3129 See https://cloud.google.com/storage/docs/using-autoclass for details.
3130
3131 :type value: convertible to boolean
3132 :param value: If true, enable Autoclass for this bucket.
3133 If false, disable Autoclass for this bucket.
3134 """
3135 autoclass = self._properties.get("autoclass", {})
3136 autoclass["enabled"] = bool(value)
3137 self._patch_property("autoclass", autoclass)
3138
3139 @property
3140 def autoclass_toggle_time(self):
3141 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket.
3142 :rtype: datetime.datetime or ``NoneType``
3143 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally.
3144 """
3145 autoclass = self._properties.get("autoclass")
3146 if autoclass is not None:
3147 timestamp = autoclass.get("toggleTime")
3148 if timestamp is not None:
3149 return _rfc3339_nanos_to_datetime(timestamp)
3150
3151 @property
3152 def autoclass_terminal_storage_class(self):
3153 """The storage class that objects in an Autoclass bucket eventually transition to if
3154 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3155
3156 See https://cloud.google.com/storage/docs/using-autoclass for details.
3157
3158 :setter: Set the terminal storage class for Autoclass configuration.
3159 :getter: Get the terminal storage class for Autoclass configuration.
3160
3161 :rtype: str
3162 :returns: The terminal storage class if Autoclass is enabled, else ``None``.
3163 """
3164 autoclass = self._properties.get("autoclass", {})
3165 return autoclass.get("terminalStorageClass", None)
3166
3167 @autoclass_terminal_storage_class.setter
3168 def autoclass_terminal_storage_class(self, value):
3169 """The storage class that objects in an Autoclass bucket eventually transition to if
3170 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3171
3172 See https://cloud.google.com/storage/docs/using-autoclass for details.
3173
3174 :type value: str
3175 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`.
3176 """
3177 autoclass = self._properties.get("autoclass", {})
3178 autoclass["terminalStorageClass"] = value
3179 self._patch_property("autoclass", autoclass)
3180
3181 @property
3182 def autoclass_terminal_storage_class_update_time(self):
3183 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket
3184 :rtype: datetime.datetime or ``NoneType``
3185 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally.
3186 """
3187 autoclass = self._properties.get("autoclass")
3188 if autoclass is not None:
3189 timestamp = autoclass.get("terminalStorageClassUpdateTime")
3190 if timestamp is not None:
3191 return _rfc3339_nanos_to_datetime(timestamp)
3192
3193 @property
3194 def object_retention_mode(self):
3195 """Retrieve the object retention mode set on the bucket.
3196
3197 :rtype: str
3198 :returns: When set to Enabled, retention configurations can be
3199 set on objects in the bucket.
3200 """
3201 object_retention = self._properties.get("objectRetention")
3202 if object_retention is not None:
3203 return object_retention.get("mode")
3204
3205 @property
3206 def hierarchical_namespace_enabled(self):
3207 """Whether hierarchical namespace is enabled for this bucket.
3208
3209 :setter: Update whether hierarchical namespace is enabled for this bucket.
3210 :getter: Query whether hierarchical namespace is enabled for this bucket.
3211
3212 :rtype: bool
3213 :returns: True if enabled, else False.
3214 """
3215 hns = self._properties.get("hierarchicalNamespace", {})
3216 return hns.get("enabled")
3217
3218 @hierarchical_namespace_enabled.setter
3219 def hierarchical_namespace_enabled(self, value):
3220 """Enable or disable hierarchical namespace at the bucket-level.
3221
3222 :type value: convertible to boolean
3223 :param value: If true, enable hierarchical namespace for this bucket.
3224 If false, disable hierarchical namespace for this bucket.
3225
3226 .. note::
3227 To enable hierarchical namespace, you must set it at bucket creation time.
3228 Currently, hierarchical namespace configuration cannot be changed after bucket creation.
3229 """
3230 hns = self._properties.get("hierarchicalNamespace", {})
3231 hns["enabled"] = bool(value)
3232 self._patch_property("hierarchicalNamespace", hns)
3233
3234 def configure_website(self, main_page_suffix=None, not_found_page=None):
3235 """Configure website-related properties.
3236
3237 See https://cloud.google.com/storage/docs/static-website
3238
3239 .. note::
3240 This configures the bucket's website-related properties,controlling how
3241 the service behaves when accessing bucket contents as a web site.
3242 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and
3243 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python)
3244 for more information.
3245
3246 :type main_page_suffix: str
3247 :param main_page_suffix: The page to use as the main page
3248 of a directory.
3249 Typically something like index.html.
3250
3251 :type not_found_page: str
3252 :param not_found_page: The file to use when a page isn't found.
3253 """
3254 data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page}
3255 self._patch_property("website", data)
3256
3257 def disable_website(self):
3258 """Disable the website configuration for this bucket.
3259
3260 This is really just a shortcut for setting the website-related
3261 attributes to ``None``.
3262 """
3263 return self.configure_website(None, None)
3264
3265 def get_iam_policy(
3266 self,
3267 client=None,
3268 requested_policy_version=None,
3269 timeout=_DEFAULT_TIMEOUT,
3270 retry=DEFAULT_RETRY,
3271 ):
3272 """Retrieve the IAM policy for the bucket.
3273
3274 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy)
3275 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python).
3276
3277 If :attr:`user_project` is set, bills the API request to that project.
3278
3279 :type client: :class:`~google.cloud.storage.client.Client` or
3280 ``NoneType``
3281 :param client: (Optional) The client to use. If not passed, falls back
3282 to the ``client`` stored on the current bucket.
3283
3284 :type requested_policy_version: int or ``NoneType``
3285 :param requested_policy_version: (Optional) The version of IAM policies to request.
3286 If a policy with a condition is requested without
3287 setting this, the server will return an error.
3288 This must be set to a value of 3 to retrieve IAM
3289 policies containing conditions. This is to prevent
3290 client code that isn't aware of IAM conditions from
3291 interpreting and modifying policies incorrectly.
3292 The service might return a policy with version lower
3293 than the one that was requested, based on the
3294 feature syntax in the policy fetched.
3295
3296 :type timeout: float or tuple
3297 :param timeout:
3298 (Optional) The amount of time, in seconds, to wait
3299 for the server response. See: :ref:`configuring_timeouts`
3300
3301 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3302 :param retry:
3303 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3304
3305 :rtype: :class:`google.api_core.iam.Policy`
3306 :returns: the policy instance, based on the resource returned from
3307 the ``getIamPolicy`` API request.
3308 """
3309 with create_trace_span(name="Storage.Bucket.getIamPolicy"):
3310 client = self._require_client(client)
3311 query_params = {}
3312
3313 if self.user_project is not None:
3314 query_params["userProject"] = self.user_project
3315
3316 if requested_policy_version is not None:
3317 query_params["optionsRequestedPolicyVersion"] = requested_policy_version
3318
3319 info = client._get_resource(
3320 f"{self.path}/iam",
3321 query_params=query_params,
3322 timeout=timeout,
3323 retry=retry,
3324 _target_object=None,
3325 )
3326 return Policy.from_api_repr(info)
3327
3328 def set_iam_policy(
3329 self,
3330 policy,
3331 client=None,
3332 timeout=_DEFAULT_TIMEOUT,
3333 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON,
3334 ):
3335 """Update the IAM policy for the bucket.
3336
3337 See
3338 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
3339
3340 If :attr:`user_project` is set, bills the API request to that project.
3341
3342 :type policy: :class:`google.api_core.iam.Policy`
3343 :param policy: policy instance used to update bucket's IAM policy.
3344
3345 :type client: :class:`~google.cloud.storage.client.Client` or
3346 ``NoneType``
3347 :param client: (Optional) The client to use. If not passed, falls back
3348 to the ``client`` stored on the current bucket.
3349
3350 :type timeout: float or tuple
3351 :param timeout:
3352 (Optional) The amount of time, in seconds, to wait
3353 for the server response. See: :ref:`configuring_timeouts`
3354
3355 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3356 :param retry:
3357 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3358
3359 :rtype: :class:`google.api_core.iam.Policy`
3360 :returns: the policy instance, based on the resource returned from
3361 the ``setIamPolicy`` API request.
3362 """
3363 with create_trace_span(name="Storage.Bucket.setIamPolicy"):
3364 client = self._require_client(client)
3365 query_params = {}
3366
3367 if self.user_project is not None:
3368 query_params["userProject"] = self.user_project
3369
3370 path = f"{self.path}/iam"
3371 resource = policy.to_api_repr()
3372 resource["resourceId"] = self.path
3373
3374 info = client._put_resource(
3375 path,
3376 resource,
3377 query_params=query_params,
3378 timeout=timeout,
3379 retry=retry,
3380 _target_object=None,
3381 )
3382
3383 return Policy.from_api_repr(info)
3384
3385 def test_iam_permissions(
3386 self, permissions, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3387 ):
3388 """API call: test permissions
3389
3390 See
3391 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
3392
3393 If :attr:`user_project` is set, bills the API request to that project.
3394
3395 :type permissions: list of string
3396 :param permissions: the permissions to check
3397
3398 :type client: :class:`~google.cloud.storage.client.Client` or
3399 ``NoneType``
3400 :param client: (Optional) The client to use. If not passed, falls back
3401 to the ``client`` stored on the current bucket.
3402
3403 :type timeout: float or tuple
3404 :param timeout:
3405 (Optional) The amount of time, in seconds, to wait
3406 for the server response. See: :ref:`configuring_timeouts`
3407
3408 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3409 :param retry:
3410 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3411
3412 :rtype: list of string
3413 :returns: the permissions returned by the ``testIamPermissions`` API
3414 request.
3415 """
3416 with create_trace_span(name="Storage.Bucket.testIamPermissions"):
3417 client = self._require_client(client)
3418 query_params = {"permissions": permissions}
3419
3420 if self.user_project is not None:
3421 query_params["userProject"] = self.user_project
3422
3423 path = f"{self.path}/iam/testPermissions"
3424 resp = client._get_resource(
3425 path,
3426 query_params=query_params,
3427 timeout=timeout,
3428 retry=retry,
3429 _target_object=None,
3430 )
3431 return resp.get("permissions", [])
3432
3433 def make_public(
3434 self,
3435 recursive=False,
3436 future=False,
3437 client=None,
3438 timeout=_DEFAULT_TIMEOUT,
3439 if_metageneration_match=None,
3440 if_metageneration_not_match=None,
3441 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3442 ):
3443 """Update bucket's ACL, granting read access to anonymous users.
3444
3445 :type recursive: bool
3446 :param recursive: If True, this will make all blobs inside the bucket
3447 public as well.
3448
3449 :type future: bool
3450 :param future: If True, this will make all objects created in the
3451 future public as well.
3452
3453 :type client: :class:`~google.cloud.storage.client.Client` or
3454 ``NoneType``
3455 :param client: (Optional) The client to use. If not passed, falls back
3456 to the ``client`` stored on the current bucket.
3457 :type timeout: float or tuple
3458 :param timeout:
3459 (Optional) The amount of time, in seconds, to wait
3460 for the server response. See: :ref:`configuring_timeouts`
3461
3462 :type if_metageneration_match: long
3463 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3464 blob's current metageneration matches the given value.
3465
3466 :type if_metageneration_not_match: long
3467 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3468 blob's current metageneration does not match the given value.
3469
3470 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3471 :param retry:
3472 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3473
3474 :raises ValueError:
3475 If ``recursive`` is True, and the bucket contains more than 256
3476 blobs. This is to prevent extremely long runtime of this
3477 method. For such buckets, iterate over the blobs returned by
3478 :meth:`list_blobs` and call
3479 :meth:`~google.cloud.storage.blob.Blob.make_public`
3480 for each blob.
3481 """
3482 with create_trace_span(name="Storage.Bucket.makePublic"):
3483 self.acl.all().grant_read()
3484 self.acl.save(
3485 client=client,
3486 timeout=timeout,
3487 if_metageneration_match=if_metageneration_match,
3488 if_metageneration_not_match=if_metageneration_not_match,
3489 retry=retry,
3490 )
3491
3492 if future:
3493 doa = self.default_object_acl
3494 if not doa.loaded:
3495 doa.reload(client=client, timeout=timeout)
3496 doa.all().grant_read()
3497 doa.save(
3498 client=client,
3499 timeout=timeout,
3500 if_metageneration_match=if_metageneration_match,
3501 if_metageneration_not_match=if_metageneration_not_match,
3502 retry=retry,
3503 )
3504
3505 if recursive:
3506 blobs = list(
3507 self.list_blobs(
3508 projection="full",
3509 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3510 client=client,
3511 timeout=timeout,
3512 )
3513 )
3514 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3515 message = (
3516 "Refusing to make public recursively with more than "
3517 "%d objects. If you actually want to make every object "
3518 "in this bucket public, iterate through the blobs "
3519 "returned by 'Bucket.list_blobs()' and call "
3520 "'make_public' on each one."
3521 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3522 raise ValueError(message)
3523
3524 for blob in blobs:
3525 blob.acl.all().grant_read()
3526 blob.acl.save(
3527 client=client,
3528 timeout=timeout,
3529 )
3530
3531 def make_private(
3532 self,
3533 recursive=False,
3534 future=False,
3535 client=None,
3536 timeout=_DEFAULT_TIMEOUT,
3537 if_metageneration_match=None,
3538 if_metageneration_not_match=None,
3539 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3540 ):
3541 """Update bucket's ACL, revoking read access for anonymous users.
3542
3543 :type recursive: bool
3544 :param recursive: If True, this will make all blobs inside the bucket
3545 private as well.
3546
3547 :type future: bool
3548 :param future: If True, this will make all objects created in the
3549 future private as well.
3550
3551 :type client: :class:`~google.cloud.storage.client.Client` or
3552 ``NoneType``
3553 :param client: (Optional) The client to use. If not passed, falls back
3554 to the ``client`` stored on the current bucket.
3555
3556 :type timeout: float or tuple
3557 :param timeout:
3558 (Optional) The amount of time, in seconds, to wait
3559 for the server response. See: :ref:`configuring_timeouts`
3560
3561 :type if_metageneration_match: long
3562 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3563 blob's current metageneration matches the given value.
3564 :type if_metageneration_not_match: long
3565 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3566 blob's current metageneration does not match the given value.
3567 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3568 :param retry:
3569 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3570
3571 :raises ValueError:
3572 If ``recursive`` is True, and the bucket contains more than 256
3573 blobs. This is to prevent extremely long runtime of this
3574 method. For such buckets, iterate over the blobs returned by
3575 :meth:`list_blobs` and call
3576 :meth:`~google.cloud.storage.blob.Blob.make_private`
3577 for each blob.
3578 """
3579 with create_trace_span(name="Storage.Bucket.makePrivate"):
3580 self.acl.all().revoke_read()
3581 self.acl.save(
3582 client=client,
3583 timeout=timeout,
3584 if_metageneration_match=if_metageneration_match,
3585 if_metageneration_not_match=if_metageneration_not_match,
3586 retry=retry,
3587 )
3588
3589 if future:
3590 doa = self.default_object_acl
3591 if not doa.loaded:
3592 doa.reload(client=client, timeout=timeout)
3593 doa.all().revoke_read()
3594 doa.save(
3595 client=client,
3596 timeout=timeout,
3597 if_metageneration_match=if_metageneration_match,
3598 if_metageneration_not_match=if_metageneration_not_match,
3599 retry=retry,
3600 )
3601
3602 if recursive:
3603 blobs = list(
3604 self.list_blobs(
3605 projection="full",
3606 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3607 client=client,
3608 timeout=timeout,
3609 )
3610 )
3611 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3612 message = (
3613 "Refusing to make private recursively with more than "
3614 "%d objects. If you actually want to make every object "
3615 "in this bucket private, iterate through the blobs "
3616 "returned by 'Bucket.list_blobs()' and call "
3617 "'make_private' on each one."
3618 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3619 raise ValueError(message)
3620
3621 for blob in blobs:
3622 blob.acl.all().revoke_read()
3623 blob.acl.save(client=client, timeout=timeout)
3624
3625 def generate_upload_policy(self, conditions, expiration=None, client=None):
3626 """Create a signed upload policy for uploading objects.
3627
3628 This method generates and signs a policy document. You can use
3629 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms)
3630 to allow visitors to a website to upload files to
3631 Google Cloud Storage without giving them direct write access.
3632 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python).
3633
3634 :type expiration: datetime
3635 :param expiration: (Optional) Expiration in UTC. If not specified, the
3636 policy will expire in 1 hour.
3637
3638 :type conditions: list
3639 :param conditions: A list of conditions as described in the
3640 `policy documents` documentation.
3641
3642 :type client: :class:`~google.cloud.storage.client.Client`
3643 :param client: (Optional) The client to use. If not passed, falls back
3644 to the ``client`` stored on the current bucket.
3645
3646 :rtype: dict
3647 :returns: A dictionary of (form field name, form field value) of form
3648 fields that should be added to your HTML upload form in order
3649 to attach the signature.
3650 """
3651 client = self._require_client(client)
3652 credentials = client._credentials
3653 _signing.ensure_signed_credentials(credentials)
3654
3655 if expiration is None:
3656 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1)
3657
3658 conditions = conditions + [{"bucket": self.name}]
3659
3660 policy_document = {
3661 "expiration": _datetime_to_rfc3339(expiration),
3662 "conditions": conditions,
3663 }
3664
3665 encoded_policy_document = base64.b64encode(
3666 json.dumps(policy_document).encode("utf-8")
3667 )
3668 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
3669
3670 fields = {
3671 "bucket": self.name,
3672 "GoogleAccessId": credentials.signer_email,
3673 "policy": encoded_policy_document.decode("utf-8"),
3674 "signature": signature.decode("utf-8"),
3675 }
3676
3677 return fields
3678
3679 def lock_retention_policy(
3680 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3681 ):
3682 """Lock the bucket's retention policy.
3683
3684 :type client: :class:`~google.cloud.storage.client.Client` or
3685 ``NoneType``
3686 :param client: (Optional) The client to use. If not passed, falls back
3687 to the ``client`` stored on the blob's bucket.
3688
3689 :type timeout: float or tuple
3690 :param timeout:
3691 (Optional) The amount of time, in seconds, to wait
3692 for the server response. See: :ref:`configuring_timeouts`
3693
3694 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3695 :param retry:
3696 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3697
3698 :raises ValueError:
3699 if the bucket has no metageneration (i.e., new or never reloaded);
3700 if the bucket has no retention policy assigned;
3701 if the bucket's retention policy is already locked.
3702 """
3703 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"):
3704 if "metageneration" not in self._properties:
3705 raise ValueError(
3706 "Bucket has no retention policy assigned: try 'reload'?"
3707 )
3708
3709 policy = self._properties.get("retentionPolicy")
3710
3711 if policy is None:
3712 raise ValueError(
3713 "Bucket has no retention policy assigned: try 'reload'?"
3714 )
3715
3716 if policy.get("isLocked"):
3717 raise ValueError("Bucket's retention policy is already locked.")
3718
3719 client = self._require_client(client)
3720
3721 query_params = {"ifMetagenerationMatch": self.metageneration}
3722
3723 if self.user_project is not None:
3724 query_params["userProject"] = self.user_project
3725
3726 path = f"/b/{self.name}/lockRetentionPolicy"
3727 api_response = client._post_resource(
3728 path,
3729 None,
3730 query_params=query_params,
3731 timeout=timeout,
3732 retry=retry,
3733 _target_object=self,
3734 )
3735 self._set_properties(api_response)
3736
3737 def generate_signed_url(
3738 self,
3739 expiration=None,
3740 api_access_endpoint=None,
3741 method="GET",
3742 headers=None,
3743 query_parameters=None,
3744 client=None,
3745 credentials=None,
3746 version=None,
3747 virtual_hosted_style=False,
3748 bucket_bound_hostname=None,
3749 scheme="http",
3750 ):
3751 """Generates a signed URL for this bucket.
3752
3753 .. note::
3754
3755 If you are on Google Compute Engine, you can't generate a signed
3756 URL using GCE service account. If you'd like to be able to generate
3757 a signed URL from GCE, you can use a standard service account from a
3758 JSON file rather than a GCE service account.
3759
3760 If you have a bucket that you want to allow access to for a set
3761 amount of time, you can use this method to generate a URL that
3762 is only valid within a certain time period.
3763
3764 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`,
3765 ``https`` works only if using a ``CDN``.
3766
3767 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
3768 :param expiration: Point in time when the signed URL should expire. If
3769 a ``datetime`` instance is passed without an explicit
3770 ``tzinfo`` set, it will be assumed to be ``UTC``.
3771
3772 :type api_access_endpoint: str
3773 :param api_access_endpoint: (Optional) URI base, for instance
3774 "https://storage.googleapis.com". If not specified, the client's
3775 api_endpoint will be used. Incompatible with bucket_bound_hostname.
3776
3777 :type method: str
3778 :param method: The HTTP verb that will be used when requesting the URL.
3779
3780 :type headers: dict
3781 :param headers:
3782 (Optional) Additional HTTP headers to be included as part of the
3783 signed URLs. See:
3784 https://cloud.google.com/storage/docs/xml-api/reference-headers
3785 Requests using the signed URL *must* pass the specified header
3786 (name and value) with each request for the URL.
3787
3788 :type query_parameters: dict
3789 :param query_parameters:
3790 (Optional) Additional query parameters to be included as part of the
3791 signed URLs. See:
3792 https://cloud.google.com/storage/docs/xml-api/reference-headers#query
3793
3794 :type client: :class:`~google.cloud.storage.client.Client` or
3795 ``NoneType``
3796 :param client: (Optional) The client to use. If not passed, falls back
3797 to the ``client`` stored on the blob's bucket.
3798
3799 :type credentials: :class:`google.auth.credentials.Credentials` or
3800 :class:`NoneType`
3801 :param credentials: The authorization credentials to attach to requests.
3802 These credentials identify this application to the service.
3803 If none are specified, the client will attempt to ascertain
3804 the credentials from the environment.
3805
3806 :type version: str
3807 :param version: (Optional) The version of signed credential to create.
3808 Must be one of 'v2' | 'v4'.
3809
3810 :type virtual_hosted_style: bool
3811 :param virtual_hosted_style:
3812 (Optional) If true, then construct the URL relative the bucket's
3813 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
3814 Incompatible with bucket_bound_hostname.
3815
3816 :type bucket_bound_hostname: str
3817 :param bucket_bound_hostname:
3818 (Optional) If passed, then construct the URL relative to the bucket-bound hostname.
3819 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'.
3820 Incompatible with api_access_endpoint and virtual_hosted_style.
3821 See: https://cloud.google.com/storage/docs/request-endpoints#cname
3822
3823 :type scheme: str
3824 :param scheme:
3825 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
3826 this value as the scheme. ``https`` will work only when using a CDN.
3827 Defaults to ``"http"``.
3828
3829 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used.
3830 :raises: :exc:`TypeError` when expiration is not a valid type.
3831 :raises: :exc:`AttributeError` if credentials is not an instance
3832 of :class:`google.auth.credentials.Signing`.
3833
3834 :rtype: str
3835 :returns: A signed URL you can use to access the resource
3836 until expiration.
3837 """
3838 if version is None:
3839 version = "v2"
3840 elif version not in ("v2", "v4"):
3841 raise ValueError("'version' must be either 'v2' or 'v4'")
3842
3843 if (
3844 api_access_endpoint is not None or virtual_hosted_style
3845 ) and bucket_bound_hostname:
3846 raise ValueError(
3847 "The bucket_bound_hostname argument is not compatible with "
3848 "either api_access_endpoint or virtual_hosted_style."
3849 )
3850
3851 if api_access_endpoint is None:
3852 client = self._require_client(client)
3853 api_access_endpoint = client.api_endpoint
3854
3855 # If you are on Google Compute Engine, you can't generate a signed URL
3856 # using GCE service account.
3857 # See https://github.com/googleapis/google-auth-library-python/issues/50
3858 if virtual_hosted_style:
3859 api_access_endpoint = _virtual_hosted_style_base_url(
3860 api_access_endpoint, self.name
3861 )
3862 resource = "/"
3863 elif bucket_bound_hostname:
3864 api_access_endpoint = _bucket_bound_hostname_url(
3865 bucket_bound_hostname, scheme
3866 )
3867 resource = "/"
3868 else:
3869 resource = f"/{self.name}"
3870
3871 if credentials is None:
3872 client = self._require_client(client) # May be redundant, but that's ok.
3873 credentials = client._credentials
3874
3875 if version == "v2":
3876 helper = generate_signed_url_v2
3877 else:
3878 helper = generate_signed_url_v4
3879
3880 return helper(
3881 credentials,
3882 resource=resource,
3883 expiration=expiration,
3884 api_access_endpoint=api_access_endpoint,
3885 method=method.upper(),
3886 headers=headers,
3887 query_parameters=query_parameters,
3888 )
3889
3890
3891class SoftDeletePolicy(dict):
3892 """Map a bucket's soft delete policy.
3893
3894 See https://cloud.google.com/storage/docs/soft-delete
3895
3896 :type bucket: :class:`Bucket`
3897 :param bucket: Bucket for which this instance is the policy.
3898
3899 :type retention_duration_seconds: int
3900 :param retention_duration_seconds:
3901 (Optional) The period of time in seconds that soft-deleted objects in the bucket
3902 will be retained and cannot be permanently deleted.
3903
3904 :type effective_time: :class:`datetime.datetime`
3905 :param effective_time:
3906 (Optional) When the bucket's soft delete policy is effective.
3907 This value should normally only be set by the back-end API.
3908 """
3909
3910 def __init__(self, bucket, **kw):
3911 data = {}
3912 retention_duration_seconds = kw.get("retention_duration_seconds")
3913 data["retentionDurationSeconds"] = retention_duration_seconds
3914
3915 effective_time = kw.get("effective_time")
3916 if effective_time is not None:
3917 effective_time = _datetime_to_rfc3339(effective_time)
3918 data["effectiveTime"] = effective_time
3919
3920 super().__init__(data)
3921 self._bucket = bucket
3922
3923 @classmethod
3924 def from_api_repr(cls, resource, bucket):
3925 """Factory: construct instance from resource.
3926
3927 :type resource: dict
3928 :param resource: mapping as returned from API call.
3929
3930 :type bucket: :class:`Bucket`
3931 :params bucket: Bucket for which this instance is the policy.
3932
3933 :rtype: :class:`SoftDeletePolicy`
3934 :returns: Instance created from resource.
3935 """
3936 instance = cls(bucket)
3937 instance.update(resource)
3938 return instance
3939
3940 @property
3941 def bucket(self):
3942 """Bucket for which this instance is the policy.
3943
3944 :rtype: :class:`Bucket`
3945 :returns: the instance's bucket.
3946 """
3947 return self._bucket
3948
3949 @property
3950 def retention_duration_seconds(self):
3951 """Get the retention duration of the bucket's soft delete policy.
3952
3953 :rtype: int or ``NoneType``
3954 :returns: The period of time in seconds that soft-deleted objects in the bucket
3955 will be retained and cannot be permanently deleted; Or ``None`` if the
3956 property is not set.
3957 """
3958 duration = self.get("retentionDurationSeconds")
3959 if duration is not None:
3960 return int(duration)
3961
3962 @retention_duration_seconds.setter
3963 def retention_duration_seconds(self, value):
3964 """Set the retention duration of the bucket's soft delete policy.
3965
3966 :type value: int
3967 :param value:
3968 The period of time in seconds that soft-deleted objects in the bucket
3969 will be retained and cannot be permanently deleted.
3970 """
3971 self["retentionDurationSeconds"] = value
3972 self.bucket._patch_property("softDeletePolicy", self)
3973
3974 @property
3975 def effective_time(self):
3976 """Get the effective time of the bucket's soft delete policy.
3977
3978 :rtype: datetime.datetime or ``NoneType``
3979 :returns: point-in time at which the bucket's soft delte policy is
3980 effective, or ``None`` if the property is not set.
3981 """
3982 timestamp = self.get("effectiveTime")
3983 if timestamp is not None:
3984 return _rfc3339_nanos_to_datetime(timestamp)
3985
3986
3987def _raise_if_len_differs(expected_len, **generation_match_args):
3988 """
3989 Raise an error if any generation match argument
3990 is set and its len differs from the given value.
3991
3992 :type expected_len: int
3993 :param expected_len: Expected argument length in case it's set.
3994
3995 :type generation_match_args: dict
3996 :param generation_match_args: Lists, which length must be checked.
3997
3998 :raises: :exc:`ValueError` if any argument set, but has an unexpected length.
3999 """
4000 for name, value in generation_match_args.items():
4001 if value is not None and len(value) != expected_len:
4002 raise ValueError(f"'{name}' length must be the same as 'blobs' length")