1# Copyright 2014 Google LLC
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Create / interact with Google Cloud Storage buckets."""
16
17import base64
18import copy
19import datetime
20import json
21from urllib.parse import urlsplit
22import warnings
23
24from google.api_core import datetime_helpers
25from google.cloud._helpers import _datetime_to_rfc3339
26from google.cloud._helpers import _rfc3339_nanos_to_datetime
27from google.cloud.exceptions import NotFound
28from google.api_core.iam import Policy
29from google.cloud.storage import _signing
30from google.cloud.storage._helpers import _add_etag_match_headers
31from google.cloud.storage._helpers import _add_generation_match_parameters
32from google.cloud.storage._helpers import _NOW
33from google.cloud.storage._helpers import _PropertyMixin
34from google.cloud.storage._helpers import _UTC
35from google.cloud.storage._helpers import _scalar_property
36from google.cloud.storage._helpers import _validate_name
37from google.cloud.storage._signing import generate_signed_url_v2
38from google.cloud.storage._signing import generate_signed_url_v4
39from google.cloud.storage._helpers import _bucket_bound_hostname_url
40from google.cloud.storage._helpers import _virtual_hosted_style_base_url
41from google.cloud.storage._opentelemetry_tracing import create_trace_span
42from google.cloud.storage.acl import BucketACL
43from google.cloud.storage.acl import DefaultObjectACL
44from google.cloud.storage.blob import Blob
45from google.cloud.storage.constants import _DEFAULT_TIMEOUT
46from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS
47from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS
48from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE
49from google.cloud.storage.constants import (
50 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,
51)
52from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS
53from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE
54from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS
55from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
56from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS
57from google.cloud.storage.constants import REGION_LOCATION_TYPE
58from google.cloud.storage.constants import STANDARD_STORAGE_CLASS
59from google.cloud.storage.ip_filter import IPFilter
60from google.cloud.storage.notification import BucketNotification
61from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
62from google.cloud.storage.retry import DEFAULT_RETRY
63from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
64from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON
65from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED
66
67
68_UBLA_BPO_ENABLED_MESSAGE = (
69 "Pass only one of 'uniform_bucket_level_access_enabled' / "
70 "'bucket_policy_only_enabled' to 'IAMConfiguration'."
71)
72_BPO_ENABLED_MESSAGE = (
73 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. "
74 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'."
75)
76_UBLA_BPO_LOCK_TIME_MESSAGE = (
77 "Pass only one of 'uniform_bucket_level_access_lock_time' / "
78 "'bucket_policy_only_lock_time' to 'IAMConfiguration'."
79)
80_BPO_LOCK_TIME_MESSAGE = (
81 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. "
82 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'."
83)
84_LOCATION_SETTER_MESSAGE = (
85 "Assignment to 'Bucket.location' is deprecated, as it is only "
86 "valid before the bucket is created. Instead, pass the location "
87 "to `Bucket.create`."
88)
89_FROM_STRING_MESSAGE = (
90 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead."
91)
92_IP_FILTER_PROPERTY = "ipFilter"
93
94
95def _blobs_page_start(iterator, page, response):
96 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
97
98 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
99 :param iterator: The iterator that is currently in use.
100
101 :type page: :class:`~google.cloud.api.core.page_iterator.Page`
102 :param page: The page that was just created.
103
104 :type response: dict
105 :param response: The JSON API response for a page of blobs.
106 """
107 page.prefixes = tuple(response.get("prefixes", ()))
108 iterator.prefixes.update(page.prefixes)
109
110
111def _item_to_blob(iterator, item):
112 """Convert a JSON blob to the native object.
113
114 .. note::
115
116 This assumes that the ``bucket`` attribute has been
117 added to the iterator after being created.
118
119 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
120 :param iterator: The iterator that has retrieved the item.
121
122 :type item: dict
123 :param item: An item to be converted to a blob.
124
125 :rtype: :class:`.Blob`
126 :returns: The next blob in the page.
127 """
128 name = item.get("name")
129 blob = Blob(name, bucket=iterator.bucket)
130 blob._set_properties(item)
131 return blob
132
133
134def _item_to_notification(iterator, item):
135 """Convert a JSON blob to the native object.
136
137 .. note::
138
139 This assumes that the ``bucket`` attribute has been
140 added to the iterator after being created.
141
142 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
143 :param iterator: The iterator that has retrieved the item.
144
145 :type item: dict
146 :param item: An item to be converted to a blob.
147
148 :rtype: :class:`.BucketNotification`
149 :returns: The next notification being iterated.
150 """
151 return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
152
153
154class LifecycleRuleConditions(dict):
155 """Map a single lifecycle rule for a bucket.
156
157 See: https://cloud.google.com/storage/docs/lifecycle
158
159 :type age: int
160 :param age: (Optional) Apply rule action to items whose age, in days,
161 exceeds this value.
162
163 :type created_before: datetime.date
164 :param created_before: (Optional) Apply rule action to items created
165 before this date.
166
167 :type is_live: bool
168 :param is_live: (Optional) If true, apply rule action to non-versioned
169 items, or to items with no newer versions. If false, apply
170 rule action to versioned items with at least one newer
171 version.
172
173 :type matches_prefix: list(str)
174 :param matches_prefix: (Optional) Apply rule action to items which
175 any prefix matches the beginning of the item name.
176
177 :type matches_storage_class: list(str), one or more of
178 :attr:`Bucket.STORAGE_CLASSES`.
179 :param matches_storage_class: (Optional) Apply rule action to items
180 whose storage class matches this value.
181
182 :type matches_suffix: list(str)
183 :param matches_suffix: (Optional) Apply rule action to items which
184 any suffix matches the end of the item name.
185
186 :type number_of_newer_versions: int
187 :param number_of_newer_versions: (Optional) Apply rule action to versioned
188 items having N newer versions.
189
190 :type days_since_custom_time: int
191 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days
192 elapsed since the custom timestamp. This condition is relevant
193 only for versioned objects. The value of the field must be a non
194 negative integer. If it's zero, the object version will become
195 eligible for lifecycle action as soon as it becomes custom.
196
197 :type custom_time_before: :class:`datetime.date`
198 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action
199 to items whose custom time is before this date. This condition is relevant
200 only for versioned objects, e.g., 2019-03-16.
201
202 :type days_since_noncurrent_time: int
203 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days
204 elapsed since the non current timestamp. This condition
205 is relevant only for versioned objects. The value of the field
206 must be a non negative integer. If it's zero, the object version
207 will become eligible for lifecycle action as soon as it becomes
208 non current.
209
210 :type noncurrent_time_before: :class:`datetime.date`
211 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply
212 rule action to items whose non current time is before this date.
213 This condition is relevant only for versioned objects, e.g, 2019-03-16.
214
215 :raises ValueError: if no arguments are passed.
216 """
217
218 def __init__(
219 self,
220 age=None,
221 created_before=None,
222 is_live=None,
223 matches_storage_class=None,
224 number_of_newer_versions=None,
225 days_since_custom_time=None,
226 custom_time_before=None,
227 days_since_noncurrent_time=None,
228 noncurrent_time_before=None,
229 matches_prefix=None,
230 matches_suffix=None,
231 _factory=False,
232 ):
233 conditions = {}
234
235 if age is not None:
236 conditions["age"] = age
237
238 if created_before is not None:
239 conditions["createdBefore"] = created_before.isoformat()
240
241 if is_live is not None:
242 conditions["isLive"] = is_live
243
244 if matches_storage_class is not None:
245 conditions["matchesStorageClass"] = matches_storage_class
246
247 if number_of_newer_versions is not None:
248 conditions["numNewerVersions"] = number_of_newer_versions
249
250 if days_since_custom_time is not None:
251 conditions["daysSinceCustomTime"] = days_since_custom_time
252
253 if custom_time_before is not None:
254 conditions["customTimeBefore"] = custom_time_before.isoformat()
255
256 if days_since_noncurrent_time is not None:
257 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time
258
259 if noncurrent_time_before is not None:
260 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat()
261
262 if matches_prefix is not None:
263 conditions["matchesPrefix"] = matches_prefix
264
265 if matches_suffix is not None:
266 conditions["matchesSuffix"] = matches_suffix
267
268 if not _factory and not conditions:
269 raise ValueError("Supply at least one condition")
270
271 super(LifecycleRuleConditions, self).__init__(conditions)
272
273 @classmethod
274 def from_api_repr(cls, resource):
275 """Factory: construct instance from resource.
276
277 :type resource: dict
278 :param resource: mapping as returned from API call.
279
280 :rtype: :class:`LifecycleRuleConditions`
281 :returns: Instance created from resource.
282 """
283 instance = cls(_factory=True)
284 instance.update(resource)
285 return instance
286
287 @property
288 def age(self):
289 """Conditon's age value."""
290 return self.get("age")
291
292 @property
293 def created_before(self):
294 """Conditon's created_before value."""
295 before = self.get("createdBefore")
296 if before is not None:
297 return datetime_helpers.from_iso8601_date(before)
298
299 @property
300 def is_live(self):
301 """Conditon's 'is_live' value."""
302 return self.get("isLive")
303
304 @property
305 def matches_prefix(self):
306 """Conditon's 'matches_prefix' value."""
307 return self.get("matchesPrefix")
308
309 @property
310 def matches_storage_class(self):
311 """Conditon's 'matches_storage_class' value."""
312 return self.get("matchesStorageClass")
313
314 @property
315 def matches_suffix(self):
316 """Conditon's 'matches_suffix' value."""
317 return self.get("matchesSuffix")
318
319 @property
320 def number_of_newer_versions(self):
321 """Conditon's 'number_of_newer_versions' value."""
322 return self.get("numNewerVersions")
323
324 @property
325 def days_since_custom_time(self):
326 """Conditon's 'days_since_custom_time' value."""
327 return self.get("daysSinceCustomTime")
328
329 @property
330 def custom_time_before(self):
331 """Conditon's 'custom_time_before' value."""
332 before = self.get("customTimeBefore")
333 if before is not None:
334 return datetime_helpers.from_iso8601_date(before)
335
336 @property
337 def days_since_noncurrent_time(self):
338 """Conditon's 'days_since_noncurrent_time' value."""
339 return self.get("daysSinceNoncurrentTime")
340
341 @property
342 def noncurrent_time_before(self):
343 """Conditon's 'noncurrent_time_before' value."""
344 before = self.get("noncurrentTimeBefore")
345 if before is not None:
346 return datetime_helpers.from_iso8601_date(before)
347
348
349class LifecycleRuleDelete(dict):
350 """Map a lifecycle rule deleting matching items.
351
352 :type kw: dict
353 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
354 """
355
356 def __init__(self, **kw):
357 conditions = LifecycleRuleConditions(**kw)
358 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)}
359 super().__init__(rule)
360
361 @classmethod
362 def from_api_repr(cls, resource):
363 """Factory: construct instance from resource.
364
365 :type resource: dict
366 :param resource: mapping as returned from API call.
367
368 :rtype: :class:`LifecycleRuleDelete`
369 :returns: Instance created from resource.
370 """
371 instance = cls(_factory=True)
372 instance.update(resource)
373 return instance
374
375
376class LifecycleRuleSetStorageClass(dict):
377 """Map a lifecycle rule updating storage class of matching items.
378
379 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`.
380 :param storage_class: new storage class to assign to matching items.
381
382 :type kw: dict
383 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
384 """
385
386 def __init__(self, storage_class, **kw):
387 conditions = LifecycleRuleConditions(**kw)
388 rule = {
389 "action": {
390 "type": "SetStorageClass",
391 "storageClass": storage_class,
392 },
393 "condition": dict(conditions),
394 }
395 super().__init__(rule)
396
397 @classmethod
398 def from_api_repr(cls, resource):
399 """Factory: construct instance from resource.
400
401 :type resource: dict
402 :param resource: mapping as returned from API call.
403
404 :rtype: :class:`LifecycleRuleSetStorageClass`
405 :returns: Instance created from resource.
406 """
407 action = resource["action"]
408 instance = cls(action["storageClass"], _factory=True)
409 instance.update(resource)
410 return instance
411
412
413class LifecycleRuleAbortIncompleteMultipartUpload(dict):
414 """Map a rule aborting incomplete multipart uploads of matching items.
415
416 The "age" lifecycle condition is the only supported condition for this rule.
417
418 :type kw: dict
419 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
420 """
421
422 def __init__(self, **kw):
423 conditions = LifecycleRuleConditions(**kw)
424 rule = {
425 "action": {"type": "AbortIncompleteMultipartUpload"},
426 "condition": dict(conditions),
427 }
428 super().__init__(rule)
429
430 @classmethod
431 def from_api_repr(cls, resource):
432 """Factory: construct instance from resource.
433
434 :type resource: dict
435 :param resource: mapping as returned from API call.
436
437 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload`
438 :returns: Instance created from resource.
439 """
440 instance = cls(_factory=True)
441 instance.update(resource)
442 return instance
443
444
445_default = object()
446
447
448class IAMConfiguration(dict):
449 """Map a bucket's IAM configuration.
450
451 :type bucket: :class:`Bucket`
452 :params bucket: Bucket for which this instance is the policy.
453
454 :type public_access_prevention: str
455 :params public_access_prevention:
456 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced'
457 See: https://cloud.google.com/storage/docs/public-access-prevention
458
459 :type uniform_bucket_level_access_enabled: bool
460 :params bucket_policy_only_enabled:
461 (Optional) Whether the IAM-only policy is enabled for the bucket.
462
463 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime`
464 :params uniform_bucket_level_locked_time:
465 (Optional) When the bucket's IAM-only policy was enabled.
466 This value should normally only be set by the back-end API.
467
468 :type bucket_policy_only_enabled: bool
469 :params bucket_policy_only_enabled:
470 Deprecated alias for :data:`uniform_bucket_level_access_enabled`.
471
472 :type bucket_policy_only_locked_time: :class:`datetime.datetime`
473 :params bucket_policy_only_locked_time:
474 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`.
475 """
476
477 def __init__(
478 self,
479 bucket,
480 public_access_prevention=_default,
481 uniform_bucket_level_access_enabled=_default,
482 uniform_bucket_level_access_locked_time=_default,
483 bucket_policy_only_enabled=_default,
484 bucket_policy_only_locked_time=_default,
485 ):
486 if bucket_policy_only_enabled is not _default:
487 if uniform_bucket_level_access_enabled is not _default:
488 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE)
489
490 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
491 uniform_bucket_level_access_enabled = bucket_policy_only_enabled
492
493 if bucket_policy_only_locked_time is not _default:
494 if uniform_bucket_level_access_locked_time is not _default:
495 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE)
496
497 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2)
498 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time
499
500 if uniform_bucket_level_access_enabled is _default:
501 uniform_bucket_level_access_enabled = False
502
503 if public_access_prevention is _default:
504 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED
505
506 data = {
507 "uniformBucketLevelAccess": {
508 "enabled": uniform_bucket_level_access_enabled
509 },
510 "publicAccessPrevention": public_access_prevention,
511 }
512 if uniform_bucket_level_access_locked_time is not _default:
513 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339(
514 uniform_bucket_level_access_locked_time
515 )
516 super(IAMConfiguration, self).__init__(data)
517 self._bucket = bucket
518
519 @classmethod
520 def from_api_repr(cls, resource, bucket):
521 """Factory: construct instance from resource.
522
523 :type bucket: :class:`Bucket`
524 :params bucket: Bucket for which this instance is the policy.
525
526 :type resource: dict
527 :param resource: mapping as returned from API call.
528
529 :rtype: :class:`IAMConfiguration`
530 :returns: Instance created from resource.
531 """
532 instance = cls(bucket)
533 instance.update(resource)
534 return instance
535
536 @property
537 def bucket(self):
538 """Bucket for which this instance is the policy.
539
540 :rtype: :class:`Bucket`
541 :returns: the instance's bucket.
542 """
543 return self._bucket
544
545 @property
546 def public_access_prevention(self):
547 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'.
548
549 See: https://cloud.google.com/storage/docs/public-access-prevention
550
551 :rtype: string
552 :returns: the public access prevention status, either 'enforced' or 'inherited'.
553 """
554 return self["publicAccessPrevention"]
555
556 @public_access_prevention.setter
557 def public_access_prevention(self, value):
558 self["publicAccessPrevention"] = value
559 self.bucket._patch_property("iamConfiguration", self)
560
561 @property
562 def uniform_bucket_level_access_enabled(self):
563 """If set, access checks only use bucket-level IAM policies or above.
564
565 :rtype: bool
566 :returns: whether the bucket is configured to allow only IAM.
567 """
568 ubla = self.get("uniformBucketLevelAccess", {})
569 return ubla.get("enabled", False)
570
571 @uniform_bucket_level_access_enabled.setter
572 def uniform_bucket_level_access_enabled(self, value):
573 ubla = self.setdefault("uniformBucketLevelAccess", {})
574 ubla["enabled"] = bool(value)
575 self.bucket._patch_property("iamConfiguration", self)
576
577 @property
578 def uniform_bucket_level_access_locked_time(self):
579 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false.
580
581 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property
582 is time time after which that setting becomes immutable.
583
584 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property
585 is ``None``.
586
587 :rtype: Union[:class:`datetime.datetime`, None]
588 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will
589 be frozen as true.
590 """
591 ubla = self.get("uniformBucketLevelAccess", {})
592 stamp = ubla.get("lockedTime")
593 if stamp is not None:
594 stamp = _rfc3339_nanos_to_datetime(stamp)
595 return stamp
596
597 @property
598 def bucket_policy_only_enabled(self):
599 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`.
600
601 :rtype: bool
602 :returns: whether the bucket is configured to allow only IAM.
603 """
604 return self.uniform_bucket_level_access_enabled
605
606 @bucket_policy_only_enabled.setter
607 def bucket_policy_only_enabled(self, value):
608 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
609 self.uniform_bucket_level_access_enabled = value
610
611 @property
612 def bucket_policy_only_locked_time(self):
613 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`.
614
615 :rtype: Union[:class:`datetime.datetime`, None]
616 :returns:
617 (readonly) Time after which :attr:`bucket_policy_only_enabled` will
618 be frozen as true.
619 """
620 return self.uniform_bucket_level_access_locked_time
621
622
623class Bucket(_PropertyMixin):
624 """A class representing a Bucket on Cloud Storage.
625
626 :type client: :class:`google.cloud.storage.client.Client`
627 :param client: A client which holds credentials and project configuration
628 for the bucket (which requires a project).
629
630 :type name: str
631 :param name: The name of the bucket. Bucket names must start and end with a
632 number or letter.
633
634 :type user_project: str
635 :param user_project: (Optional) the project ID to be billed for API
636 requests made via this instance.
637
638 :type generation: int
639 :param generation: (Optional) If present, selects a specific revision of
640 this bucket.
641 """
642
643 _MAX_OBJECTS_FOR_ITERATION = 256
644 """Maximum number of existing objects allowed in iteration.
645
646 This is used in Bucket.delete() and Bucket.make_public().
647 """
648
649 STORAGE_CLASSES = (
650 STANDARD_STORAGE_CLASS,
651 NEARLINE_STORAGE_CLASS,
652 COLDLINE_STORAGE_CLASS,
653 ARCHIVE_STORAGE_CLASS,
654 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy
655 REGIONAL_LEGACY_STORAGE_CLASS, # legacy
656 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy
657 )
658 """Allowed values for :attr:`storage_class`.
659
660 Default value is :attr:`STANDARD_STORAGE_CLASS`.
661
662 See
663 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
664 https://cloud.google.com/storage/docs/storage-classes
665 """
666
667 _LOCATION_TYPES = (
668 MULTI_REGION_LOCATION_TYPE,
669 REGION_LOCATION_TYPE,
670 DUAL_REGION_LOCATION_TYPE,
671 )
672 """Allowed values for :attr:`location_type`."""
673
674 def __init__(self, client, name=None, user_project=None, generation=None):
675 """
676 property :attr:`name`
677 Get the bucket's name.
678 """
679 name = _validate_name(name)
680 super(Bucket, self).__init__(name=name)
681 self._client = client
682 self._acl = BucketACL(self)
683 self._default_object_acl = DefaultObjectACL(self)
684 self._label_removals = set()
685 self._user_project = user_project
686
687 if generation is not None:
688 self._properties["generation"] = generation
689
690 def __repr__(self):
691 return f"<Bucket: {self.name}>"
692
693 @property
694 def client(self):
695 """The client bound to this bucket."""
696 return self._client
697
698 def _set_properties(self, value):
699 """Set the properties for the current object.
700
701 :type value: dict or :class:`google.cloud.storage.batch._FutureDict`
702 :param value: The properties to be set.
703 """
704 self._label_removals.clear()
705 return super(Bucket, self)._set_properties(value)
706
707 @property
708 def rpo(self):
709 """Get the RPO (Recovery Point Objective) of this bucket
710
711 See: https://cloud.google.com/storage/docs/managing-turbo-replication
712
713 "ASYNC_TURBO" or "DEFAULT"
714 :rtype: str
715 """
716 return self._properties.get("rpo")
717
718 @rpo.setter
719 def rpo(self, value):
720 """
721 Set the RPO (Recovery Point Objective) of this bucket.
722
723 See: https://cloud.google.com/storage/docs/managing-turbo-replication
724
725 :type value: str
726 :param value: "ASYNC_TURBO" or "DEFAULT"
727 """
728 self._patch_property("rpo", value)
729
730 @property
731 def user_project(self):
732 """Project ID to be billed for API requests made via this bucket.
733
734 If unset, API requests are billed to the bucket owner.
735
736 A user project is required for all operations on Requester Pays buckets.
737
738 See https://cloud.google.com/storage/docs/requester-pays#requirements for details.
739
740 :rtype: str
741 """
742 return self._user_project
743
744 @property
745 def generation(self):
746 """Retrieve the generation for the bucket.
747
748 :rtype: int or ``NoneType``
749 :returns: The generation of the bucket or ``None`` if the bucket's
750 resource has not been loaded from the server.
751 """
752 generation = self._properties.get("generation")
753 if generation is not None:
754 return int(generation)
755
756 @property
757 def soft_delete_time(self):
758 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted.
759
760 :rtype: :class:`datetime.datetime` or ``NoneType``
761 :returns:
762 (readonly) The time that the bucket became soft-deleted.
763 Note this property is only set for soft-deleted buckets.
764 """
765 soft_delete_time = self._properties.get("softDeleteTime")
766 if soft_delete_time is not None:
767 return _rfc3339_nanos_to_datetime(soft_delete_time)
768
769 @property
770 def hard_delete_time(self):
771 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted.
772
773 :rtype: :class:`datetime.datetime` or ``NoneType``
774 :returns:
775 (readonly) The time that the bucket will be permanently deleted.
776 Note this property is only set for soft-deleted buckets.
777 """
778 hard_delete_time = self._properties.get("hardDeleteTime")
779 if hard_delete_time is not None:
780 return _rfc3339_nanos_to_datetime(hard_delete_time)
781
782 @property
783 def _query_params(self):
784 """Default query parameters."""
785 params = super()._query_params
786 return params
787
788 @classmethod
789 def from_uri(cls, uri, client=None):
790 """Get a constructor for bucket object by URI.
791
792 .. code-block:: python
793
794 from google.cloud import storage
795 from google.cloud.storage.bucket import Bucket
796 client = storage.Client()
797 bucket = Bucket.from_uri("gs://bucket", client=client)
798
799 :type uri: str
800 :param uri: The bucket uri pass to get bucket object.
801
802 :type client: :class:`~google.cloud.storage.client.Client` or
803 ``NoneType``
804 :param client: (Optional) The client to use. Application code should
805 *always* pass ``client``.
806
807 :rtype: :class:`google.cloud.storage.bucket.Bucket`
808 :returns: The bucket object created.
809 """
810 scheme, netloc, path, query, frag = urlsplit(uri)
811
812 if scheme != "gs":
813 raise ValueError("URI scheme must be gs")
814
815 return cls(client, name=netloc)
816
817 @classmethod
818 def from_string(cls, uri, client=None):
819 """Get a constructor for bucket object by URI.
820
821 .. note::
822 Deprecated alias for :meth:`from_uri`.
823
824 .. code-block:: python
825
826 from google.cloud import storage
827 from google.cloud.storage.bucket import Bucket
828 client = storage.Client()
829 bucket = Bucket.from_string("gs://bucket", client=client)
830
831 :type uri: str
832 :param uri: The bucket uri pass to get bucket object.
833
834 :type client: :class:`~google.cloud.storage.client.Client` or
835 ``NoneType``
836 :param client: (Optional) The client to use. Application code should
837 *always* pass ``client``.
838
839 :rtype: :class:`google.cloud.storage.bucket.Bucket`
840 :returns: The bucket object created.
841 """
842 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2)
843 return Bucket.from_uri(uri=uri, client=client)
844
845 def blob(
846 self,
847 blob_name,
848 chunk_size=None,
849 encryption_key=None,
850 kms_key_name=None,
851 generation=None,
852 crc32c_checksum=None,
853 ):
854 """Factory constructor for blob object.
855
856 .. note::
857 This will not make an HTTP request; it simply instantiates
858 a blob object owned by this bucket.
859
860 :type blob_name: str
861 :param blob_name: The name of the blob to be instantiated.
862
863 :type chunk_size: int
864 :param chunk_size: The size of a chunk of data whenever iterating
865 (in bytes). This must be a multiple of 256 KB per
866 the API specification.
867
868 :type encryption_key: bytes
869 :param encryption_key:
870 (Optional) 32 byte encryption key for customer-supplied encryption.
871
872 :type kms_key_name: str
873 :param kms_key_name:
874 (Optional) Resource name of KMS key used to encrypt blob's content.
875
876 :type generation: long
877 :param generation: (Optional) If present, selects a specific revision of
878 this object.
879
880 :type crc32c_checksum: str
881 :param crc32c_checksum:
882 (Optional) If set, the CRC32C checksum of the blob's content.
883 CRC32c checksum, as described in RFC 4960, Appendix B; encoded using
884 base64 in big-endian byte order. See
885 Apenndix B: https://datatracker.ietf.org/doc/html/rfc4960#appendix-B
886 base64: https://datatracker.ietf.org/doc/html/rfc4648#section-4
887
888 :rtype: :class:`google.cloud.storage.blob.Blob`
889 :returns: The blob object created.
890 """
891 return Blob(
892 name=blob_name,
893 bucket=self,
894 chunk_size=chunk_size,
895 encryption_key=encryption_key,
896 kms_key_name=kms_key_name,
897 generation=generation,
898 crc32c_checksum=crc32c_checksum,
899 )
900
901 def notification(
902 self,
903 topic_name=None,
904 topic_project=None,
905 custom_attributes=None,
906 event_types=None,
907 blob_name_prefix=None,
908 payload_format=NONE_PAYLOAD_FORMAT,
909 notification_id=None,
910 ):
911 """Factory: create a notification resource for the bucket.
912
913 See: :class:`.BucketNotification` for parameters.
914
915 :rtype: :class:`.BucketNotification`
916 """
917 return BucketNotification(
918 self,
919 topic_name=topic_name,
920 topic_project=topic_project,
921 custom_attributes=custom_attributes,
922 event_types=event_types,
923 blob_name_prefix=blob_name_prefix,
924 payload_format=payload_format,
925 notification_id=notification_id,
926 )
927
928 def exists(
929 self,
930 client=None,
931 timeout=_DEFAULT_TIMEOUT,
932 if_etag_match=None,
933 if_etag_not_match=None,
934 if_metageneration_match=None,
935 if_metageneration_not_match=None,
936 retry=DEFAULT_RETRY,
937 ):
938 """Determines whether or not this bucket exists.
939
940 If :attr:`user_project` is set, bills the API request to that project.
941
942 :type client: :class:`~google.cloud.storage.client.Client` or
943 ``NoneType``
944 :param client: (Optional) The client to use. If not passed, falls back
945 to the ``client`` stored on the current bucket.
946
947 :type timeout: float or tuple
948 :param timeout:
949 (Optional) The amount of time, in seconds, to wait
950 for the server response. See: :ref:`configuring_timeouts`
951
952 :type if_etag_match: Union[str, Set[str]]
953 :param if_etag_match: (Optional) Make the operation conditional on whether the
954 bucket's current ETag matches the given value.
955
956 :type if_etag_not_match: Union[str, Set[str]])
957 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
958 bucket's current ETag does not match the given value.
959
960 :type if_metageneration_match: long
961 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
962 bucket's current metageneration matches the given value.
963
964 :type if_metageneration_not_match: long
965 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
966 bucket's current metageneration does not match the given value.
967
968 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
969 :param retry:
970 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
971
972 :rtype: bool
973 :returns: True if the bucket exists in Cloud Storage.
974 """
975 with create_trace_span(name="Storage.Bucket.exists"):
976 client = self._require_client(client)
977 # We only need the status code (200 or not) so we seek to
978 # minimize the returned payload.
979 query_params = {"fields": "name"}
980
981 if self.user_project is not None:
982 query_params["userProject"] = self.user_project
983
984 _add_generation_match_parameters(
985 query_params,
986 if_metageneration_match=if_metageneration_match,
987 if_metageneration_not_match=if_metageneration_not_match,
988 )
989
990 headers = {}
991 _add_etag_match_headers(
992 headers,
993 if_etag_match=if_etag_match,
994 if_etag_not_match=if_etag_not_match,
995 )
996
997 try:
998 # We intentionally pass `_target_object=None` since fields=name
999 # would limit the local properties.
1000 client._get_resource(
1001 self.path,
1002 query_params=query_params,
1003 headers=headers,
1004 timeout=timeout,
1005 retry=retry,
1006 _target_object=None,
1007 )
1008 except NotFound:
1009 # NOTE: This will not fail immediately in a batch. However, when
1010 # Batch.finish() is called, the resulting `NotFound` will be
1011 # raised.
1012 return False
1013 return True
1014
1015 def create(
1016 self,
1017 client=None,
1018 project=None,
1019 location=None,
1020 predefined_acl=None,
1021 predefined_default_object_acl=None,
1022 enable_object_retention=False,
1023 timeout=_DEFAULT_TIMEOUT,
1024 retry=DEFAULT_RETRY,
1025 ):
1026 """Creates current bucket.
1027
1028 If the bucket already exists, will raise
1029 :class:`google.cloud.exceptions.Conflict`.
1030
1031 This implements "storage.buckets.insert".
1032
1033 If :attr:`user_project` is set, bills the API request to that project.
1034
1035 :type client: :class:`~google.cloud.storage.client.Client` or
1036 ``NoneType``
1037 :param client: (Optional) The client to use. If not passed, falls back
1038 to the ``client`` stored on the current bucket.
1039
1040 :type project: str
1041 :param project: (Optional) The project under which the bucket is to
1042 be created. If not passed, uses the project set on
1043 the client.
1044 :raises ValueError: if ``project`` is None and client's
1045 :attr:`project` is also None.
1046
1047 :type location: str
1048 :param location: (Optional) The location of the bucket. If not passed,
1049 the default location, US, will be used. See
1050 https://cloud.google.com/storage/docs/bucket-locations
1051
1052 :type predefined_acl: str
1053 :param predefined_acl:
1054 (Optional) Name of predefined ACL to apply to bucket. See:
1055 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1056
1057 :type predefined_default_object_acl: str
1058 :param predefined_default_object_acl:
1059 (Optional) Name of predefined ACL to apply to bucket's objects. See:
1060 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1061
1062 :type enable_object_retention: bool
1063 :param enable_object_retention:
1064 (Optional) Whether object retention should be enabled on this bucket. See:
1065 https://cloud.google.com/storage/docs/object-lock
1066
1067 :type timeout: float or tuple
1068 :param timeout:
1069 (Optional) The amount of time, in seconds, to wait
1070 for the server response. See: :ref:`configuring_timeouts`
1071
1072 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1073 :param retry:
1074 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1075 """
1076 with create_trace_span(name="Storage.Bucket.create"):
1077 client = self._require_client(client)
1078 client.create_bucket(
1079 bucket_or_name=self,
1080 project=project,
1081 user_project=self.user_project,
1082 location=location,
1083 predefined_acl=predefined_acl,
1084 predefined_default_object_acl=predefined_default_object_acl,
1085 enable_object_retention=enable_object_retention,
1086 timeout=timeout,
1087 retry=retry,
1088 )
1089
1090 def update(
1091 self,
1092 client=None,
1093 timeout=_DEFAULT_TIMEOUT,
1094 if_metageneration_match=None,
1095 if_metageneration_not_match=None,
1096 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1097 ):
1098 """Sends all properties in a PUT request.
1099
1100 Updates the ``_properties`` with the response from the backend.
1101
1102 If :attr:`user_project` is set, bills the API request to that project.
1103
1104 :type client: :class:`~google.cloud.storage.client.Client` or
1105 ``NoneType``
1106 :param client: the client to use. If not passed, falls back to the
1107 ``client`` stored on the current object.
1108
1109 :type timeout: float or tuple
1110 :param timeout:
1111 (Optional) The amount of time, in seconds, to wait
1112 for the server response. See: :ref:`configuring_timeouts`
1113
1114 :type if_metageneration_match: long
1115 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1116 blob's current metageneration matches the given value.
1117
1118 :type if_metageneration_not_match: long
1119 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1120 blob's current metageneration does not match the given value.
1121
1122 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1123 :param retry:
1124 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1125 """
1126 with create_trace_span(name="Storage.Bucket.update"):
1127 super(Bucket, self).update(
1128 client=client,
1129 timeout=timeout,
1130 if_metageneration_match=if_metageneration_match,
1131 if_metageneration_not_match=if_metageneration_not_match,
1132 retry=retry,
1133 )
1134
1135 def reload(
1136 self,
1137 client=None,
1138 projection="noAcl",
1139 timeout=_DEFAULT_TIMEOUT,
1140 if_etag_match=None,
1141 if_etag_not_match=None,
1142 if_metageneration_match=None,
1143 if_metageneration_not_match=None,
1144 retry=DEFAULT_RETRY,
1145 soft_deleted=None,
1146 ):
1147 """Reload properties from Cloud Storage.
1148
1149 If :attr:`user_project` is set, bills the API request to that project.
1150
1151 :type client: :class:`~google.cloud.storage.client.Client` or
1152 ``NoneType``
1153 :param client: the client to use. If not passed, falls back to the
1154 ``client`` stored on the current object.
1155
1156 :type projection: str
1157 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1158 Defaults to ``'noAcl'``. Specifies the set of
1159 properties to return.
1160
1161 :type timeout: float or tuple
1162 :param timeout:
1163 (Optional) The amount of time, in seconds, to wait
1164 for the server response. See: :ref:`configuring_timeouts`
1165
1166 :type if_etag_match: Union[str, Set[str]]
1167 :param if_etag_match: (Optional) Make the operation conditional on whether the
1168 bucket's current ETag matches the given value.
1169
1170 :type if_etag_not_match: Union[str, Set[str]])
1171 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
1172 bucket's current ETag does not match the given value.
1173
1174 :type if_metageneration_match: long
1175 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1176 bucket's current metageneration matches the given value.
1177
1178 :type if_metageneration_not_match: long
1179 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1180 bucket's current metageneration does not match the given value.
1181
1182 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1183 :param retry:
1184 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1185
1186 :type soft_deleted: bool
1187 :param soft_deleted: (Optional) If True, looks for a soft-deleted
1188 bucket. Will only return the bucket metadata if the bucket exists
1189 and is in a soft-deleted state. The bucket ``generation`` must be
1190 set if ``soft_deleted`` is set to True.
1191 See: https://cloud.google.com/storage/docs/soft-delete
1192 """
1193 with create_trace_span(name="Storage.Bucket.reload"):
1194 super(Bucket, self).reload(
1195 client=client,
1196 projection=projection,
1197 timeout=timeout,
1198 if_etag_match=if_etag_match,
1199 if_etag_not_match=if_etag_not_match,
1200 if_metageneration_match=if_metageneration_match,
1201 if_metageneration_not_match=if_metageneration_not_match,
1202 retry=retry,
1203 soft_deleted=soft_deleted,
1204 )
1205
1206 def patch(
1207 self,
1208 client=None,
1209 timeout=_DEFAULT_TIMEOUT,
1210 if_metageneration_match=None,
1211 if_metageneration_not_match=None,
1212 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1213 ):
1214 """Sends all changed properties in a PATCH request.
1215
1216 Updates the ``_properties`` with the response from the backend.
1217
1218 If :attr:`user_project` is set, bills the API request to that project.
1219
1220 :type client: :class:`~google.cloud.storage.client.Client` or
1221 ``NoneType``
1222 :param client: the client to use. If not passed, falls back to the
1223 ``client`` stored on the current object.
1224
1225 :type timeout: float or tuple
1226 :param timeout:
1227 (Optional) The amount of time, in seconds, to wait
1228 for the server response. See: :ref:`configuring_timeouts`
1229
1230 :type if_metageneration_match: long
1231 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1232 blob's current metageneration matches the given value.
1233
1234 :type if_metageneration_not_match: long
1235 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1236 blob's current metageneration does not match the given value.
1237
1238 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1239 :param retry:
1240 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1241 """
1242 with create_trace_span(name="Storage.Bucket.patch"):
1243 # Special case: For buckets, it is possible that labels are being
1244 # removed; this requires special handling.
1245 if self._label_removals:
1246 self._changes.add("labels")
1247 self._properties.setdefault("labels", {})
1248 for removed_label in self._label_removals:
1249 self._properties["labels"][removed_label] = None
1250
1251 # Call the superclass method.
1252 super(Bucket, self).patch(
1253 client=client,
1254 if_metageneration_match=if_metageneration_match,
1255 if_metageneration_not_match=if_metageneration_not_match,
1256 timeout=timeout,
1257 retry=retry,
1258 )
1259
1260 @property
1261 def acl(self):
1262 """Create our ACL on demand."""
1263 return self._acl
1264
1265 @property
1266 def default_object_acl(self):
1267 """Create our defaultObjectACL on demand."""
1268 return self._default_object_acl
1269
1270 @staticmethod
1271 def path_helper(bucket_name):
1272 """Relative URL path for a bucket.
1273
1274 :type bucket_name: str
1275 :param bucket_name: The bucket name in the path.
1276
1277 :rtype: str
1278 :returns: The relative URL path for ``bucket_name``.
1279 """
1280 return "/b/" + bucket_name
1281
1282 @property
1283 def path(self):
1284 """The URL path to this bucket."""
1285 if not self.name:
1286 raise ValueError("Cannot determine path without bucket name.")
1287
1288 return self.path_helper(self.name)
1289
1290 def get_blob(
1291 self,
1292 blob_name,
1293 client=None,
1294 encryption_key=None,
1295 generation=None,
1296 if_etag_match=None,
1297 if_etag_not_match=None,
1298 if_generation_match=None,
1299 if_generation_not_match=None,
1300 if_metageneration_match=None,
1301 if_metageneration_not_match=None,
1302 timeout=_DEFAULT_TIMEOUT,
1303 retry=DEFAULT_RETRY,
1304 soft_deleted=None,
1305 **kwargs,
1306 ):
1307 """Get a blob object by name.
1308
1309 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python)
1310 on how to retrieve metadata of an object.
1311
1312 If :attr:`user_project` is set, bills the API request to that project.
1313
1314 :type blob_name: str
1315 :param blob_name: The name of the blob to retrieve.
1316
1317 :type client: :class:`~google.cloud.storage.client.Client` or
1318 ``NoneType``
1319 :param client: (Optional) The client to use. If not passed, falls back
1320 to the ``client`` stored on the current bucket.
1321
1322 :type encryption_key: bytes
1323 :param encryption_key:
1324 (Optional) 32 byte encryption key for customer-supplied encryption.
1325 See
1326 https://cloud.google.com/storage/docs/encryption#customer-supplied.
1327
1328 :type generation: long
1329 :param generation:
1330 (Optional) If present, selects a specific revision of this object.
1331
1332 :type if_etag_match: Union[str, Set[str]]
1333 :param if_etag_match:
1334 (Optional) See :ref:`using-if-etag-match`
1335
1336 :type if_etag_not_match: Union[str, Set[str]]
1337 :param if_etag_not_match:
1338 (Optional) See :ref:`using-if-etag-not-match`
1339
1340 :type if_generation_match: long
1341 :param if_generation_match:
1342 (Optional) See :ref:`using-if-generation-match`
1343
1344 :type if_generation_not_match: long
1345 :param if_generation_not_match:
1346 (Optional) See :ref:`using-if-generation-not-match`
1347
1348 :type if_metageneration_match: long
1349 :param if_metageneration_match:
1350 (Optional) See :ref:`using-if-metageneration-match`
1351
1352 :type if_metageneration_not_match: long
1353 :param if_metageneration_not_match:
1354 (Optional) See :ref:`using-if-metageneration-not-match`
1355
1356 :type timeout: float or tuple
1357 :param timeout:
1358 (Optional) The amount of time, in seconds, to wait
1359 for the server response. See: :ref:`configuring_timeouts`
1360
1361 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1362 :param retry:
1363 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1364
1365 :type soft_deleted: bool
1366 :param soft_deleted:
1367 (Optional) If True, looks for a soft-deleted object. Will only return
1368 the object metadata if the object exists and is in a soft-deleted state.
1369 Object ``generation`` is required if ``soft_deleted`` is set to True.
1370 See: https://cloud.google.com/storage/docs/soft-delete
1371
1372 :param kwargs: Keyword arguments to pass to the
1373 :class:`~google.cloud.storage.blob.Blob` constructor.
1374
1375 :rtype: :class:`google.cloud.storage.blob.Blob` or None
1376 :returns: The blob object if it exists, otherwise None.
1377 """
1378 with create_trace_span(name="Storage.Bucket.getBlob"):
1379 blob = Blob(
1380 bucket=self,
1381 name=blob_name,
1382 encryption_key=encryption_key,
1383 generation=generation,
1384 **kwargs,
1385 )
1386 try:
1387 # NOTE: This will not fail immediately in a batch. However, when
1388 # Batch.finish() is called, the resulting `NotFound` will be
1389 # raised.
1390 blob.reload(
1391 client=client,
1392 timeout=timeout,
1393 if_etag_match=if_etag_match,
1394 if_etag_not_match=if_etag_not_match,
1395 if_generation_match=if_generation_match,
1396 if_generation_not_match=if_generation_not_match,
1397 if_metageneration_match=if_metageneration_match,
1398 if_metageneration_not_match=if_metageneration_not_match,
1399 retry=retry,
1400 soft_deleted=soft_deleted,
1401 )
1402 except NotFound:
1403 return None
1404 else:
1405 return blob
1406
1407 def list_blobs(
1408 self,
1409 max_results=None,
1410 page_token=None,
1411 prefix=None,
1412 delimiter=None,
1413 start_offset=None,
1414 end_offset=None,
1415 include_trailing_delimiter=None,
1416 versions=None,
1417 projection="noAcl",
1418 fields=None,
1419 client=None,
1420 timeout=_DEFAULT_TIMEOUT,
1421 retry=DEFAULT_RETRY,
1422 match_glob=None,
1423 include_folders_as_prefixes=None,
1424 soft_deleted=None,
1425 page_size=None,
1426 ):
1427 """Return an iterator used to find blobs in the bucket.
1428
1429 If :attr:`user_project` is set, bills the API request to that project.
1430
1431 :type max_results: int
1432 :param max_results:
1433 (Optional) The maximum number of blobs to return.
1434
1435 :type page_token: str
1436 :param page_token:
1437 (Optional) If present, return the next batch of blobs, using the
1438 value, which must correspond to the ``nextPageToken`` value
1439 returned in the previous response. Deprecated: use the ``pages``
1440 property of the returned iterator instead of manually passing the
1441 token.
1442
1443 :type prefix: str
1444 :param prefix: (Optional) Prefix used to filter blobs.
1445
1446 :type delimiter: str
1447 :param delimiter: (Optional) Delimiter, used with ``prefix`` to
1448 emulate hierarchy.
1449
1450 :type start_offset: str
1451 :param start_offset:
1452 (Optional) Filter results to objects whose names are
1453 lexicographically equal to or after ``startOffset``. If
1454 ``endOffset`` is also set, the objects listed will have names
1455 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive).
1456
1457 :type end_offset: str
1458 :param end_offset:
1459 (Optional) Filter results to objects whose names are
1460 lexicographically before ``endOffset``. If ``startOffset`` is also
1461 set, the objects listed will have names between ``startOffset``
1462 (inclusive) and ``endOffset`` (exclusive).
1463
1464 :type include_trailing_delimiter: boolean
1465 :param include_trailing_delimiter:
1466 (Optional) If true, objects that end in exactly one instance of
1467 ``delimiter`` will have their metadata included in ``items`` in
1468 addition to ``prefixes``.
1469
1470 :type versions: bool
1471 :param versions: (Optional) Whether object versions should be returned
1472 as separate blobs.
1473
1474 :type projection: str
1475 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1476 Defaults to ``'noAcl'``. Specifies the set of
1477 properties to return.
1478
1479 :type fields: str
1480 :param fields:
1481 (Optional) Selector specifying which fields to include
1482 in a partial response. Must be a list of fields. For
1483 example to get a partial response with just the next
1484 page token and the name and language of each blob returned:
1485 ``'items(name,contentLanguage),nextPageToken'``.
1486 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
1487
1488 :type client: :class:`~google.cloud.storage.client.Client`
1489 :param client: (Optional) The client to use. If not passed, falls back
1490 to the ``client`` stored on the current bucket.
1491
1492 :type timeout: float or tuple
1493 :param timeout:
1494 (Optional) The amount of time, in seconds, to wait
1495 for the server response. See: :ref:`configuring_timeouts`
1496
1497 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1498 :param retry:
1499 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1500
1501 :type match_glob: str
1502 :param match_glob:
1503 (Optional) A glob pattern used to filter results (for example, foo*bar).
1504 The string value must be UTF-8 encoded. See:
1505 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob
1506
1507 :type include_folders_as_prefixes: bool
1508 (Optional) If true, includes Folders and Managed Folders in the set of
1509 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /.
1510 See: https://cloud.google.com/storage/docs/managed-folders
1511
1512 :type soft_deleted: bool
1513 :param soft_deleted:
1514 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing
1515 generation number. This parameter can only be used successfully if the bucket has a soft delete policy.
1516 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See:
1517 https://cloud.google.com/storage/docs/soft-delete
1518
1519 :type page_size: int
1520 :param page_size:
1521 (Optional) Maximum number of blobs to return in each page.
1522 Defaults to a value set by the API.
1523
1524 :rtype: :class:`~google.api_core.page_iterator.Iterator`
1525 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
1526 in this bucket matching the arguments.
1527 """
1528 with create_trace_span(name="Storage.Bucket.listBlobs"):
1529 client = self._require_client(client)
1530 return client.list_blobs(
1531 self,
1532 max_results=max_results,
1533 page_token=page_token,
1534 prefix=prefix,
1535 delimiter=delimiter,
1536 start_offset=start_offset,
1537 end_offset=end_offset,
1538 include_trailing_delimiter=include_trailing_delimiter,
1539 versions=versions,
1540 projection=projection,
1541 fields=fields,
1542 page_size=page_size,
1543 timeout=timeout,
1544 retry=retry,
1545 match_glob=match_glob,
1546 include_folders_as_prefixes=include_folders_as_prefixes,
1547 soft_deleted=soft_deleted,
1548 )
1549
1550 def list_notifications(
1551 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
1552 ):
1553 """List Pub / Sub notifications for this bucket.
1554
1555 See:
1556 https://cloud.google.com/storage/docs/json_api/v1/notifications/list
1557
1558 If :attr:`user_project` is set, bills the API request to that project.
1559
1560 :type client: :class:`~google.cloud.storage.client.Client` or
1561 ``NoneType``
1562 :param client: (Optional) The client to use. If not passed, falls back
1563 to the ``client`` stored on the current bucket.
1564 :type timeout: float or tuple
1565 :param timeout:
1566 (Optional) The amount of time, in seconds, to wait
1567 for the server response. See: :ref:`configuring_timeouts`
1568
1569 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1570 :param retry:
1571 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1572
1573 :rtype: list of :class:`.BucketNotification`
1574 :returns: notification instances
1575 """
1576 with create_trace_span(name="Storage.Bucket.listNotifications"):
1577 client = self._require_client(client)
1578 path = self.path + "/notificationConfigs"
1579 iterator = client._list_resource(
1580 path,
1581 _item_to_notification,
1582 timeout=timeout,
1583 retry=retry,
1584 )
1585 iterator.bucket = self
1586 return iterator
1587
1588 def get_notification(
1589 self,
1590 notification_id,
1591 client=None,
1592 timeout=_DEFAULT_TIMEOUT,
1593 retry=DEFAULT_RETRY,
1594 ):
1595 """Get Pub / Sub notification for this bucket.
1596
1597 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get)
1598 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python).
1599
1600 If :attr:`user_project` is set, bills the API request to that project.
1601
1602 :type notification_id: str
1603 :param notification_id: The notification id to retrieve the notification configuration.
1604
1605 :type client: :class:`~google.cloud.storage.client.Client` or
1606 ``NoneType``
1607 :param client: (Optional) The client to use. If not passed, falls back
1608 to the ``client`` stored on the current bucket.
1609 :type timeout: float or tuple
1610 :param timeout:
1611 (Optional) The amount of time, in seconds, to wait
1612 for the server response. See: :ref:`configuring_timeouts`
1613
1614 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1615 :param retry:
1616 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1617
1618 :rtype: :class:`.BucketNotification`
1619 :returns: notification instance.
1620 """
1621 with create_trace_span(name="Storage.Bucket.getNotification"):
1622 notification = self.notification(notification_id=notification_id)
1623 notification.reload(client=client, timeout=timeout, retry=retry)
1624 return notification
1625
1626 def delete(
1627 self,
1628 force=False,
1629 client=None,
1630 if_metageneration_match=None,
1631 if_metageneration_not_match=None,
1632 timeout=_DEFAULT_TIMEOUT,
1633 retry=DEFAULT_RETRY,
1634 ):
1635 """Delete this bucket.
1636
1637 The bucket **must** be empty in order to submit a delete request. If
1638 ``force=True`` is passed, this will first attempt to delete all the
1639 objects / blobs in the bucket (i.e. try to empty the bucket).
1640
1641 If the bucket doesn't exist, this will raise
1642 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
1643 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`.
1644
1645 If ``force=True`` and the bucket contains more than 256 objects / blobs
1646 this will cowardly refuse to delete the objects (or the bucket). This
1647 is to prevent accidental bucket deletion and to prevent extremely long
1648 runtime of this method. Also note that ``force=True`` is not supported
1649 in a ``Batch`` context.
1650
1651 If :attr:`user_project` is set, bills the API request to that project.
1652
1653 :type force: bool
1654 :param force: If True, empties the bucket's objects then deletes it.
1655
1656 :type client: :class:`~google.cloud.storage.client.Client` or
1657 ``NoneType``
1658 :param client: (Optional) The client to use. If not passed, falls back
1659 to the ``client`` stored on the current bucket.
1660
1661 :type if_metageneration_match: long
1662 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1663 blob's current metageneration matches the given value.
1664
1665 :type if_metageneration_not_match: long
1666 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1667 blob's current metageneration does not match the given value.
1668
1669 :type timeout: float or tuple
1670 :param timeout:
1671 (Optional) The amount of time, in seconds, to wait
1672 for the server response. See: :ref:`configuring_timeouts`
1673
1674 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1675 :param retry:
1676 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1677
1678 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
1679 contains more than 256 objects / blobs.
1680 """
1681 with create_trace_span(name="Storage.Bucket.delete"):
1682 client = self._require_client(client)
1683 query_params = {}
1684
1685 if self.user_project is not None:
1686 query_params["userProject"] = self.user_project
1687
1688 _add_generation_match_parameters(
1689 query_params,
1690 if_metageneration_match=if_metageneration_match,
1691 if_metageneration_not_match=if_metageneration_not_match,
1692 )
1693 if force:
1694 blobs = list(
1695 self.list_blobs(
1696 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
1697 client=client,
1698 timeout=timeout,
1699 retry=retry,
1700 versions=True,
1701 )
1702 )
1703 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
1704 message = (
1705 "Refusing to delete bucket with more than "
1706 "%d objects. If you actually want to delete "
1707 "this bucket, please delete the objects "
1708 "yourself before calling Bucket.delete()."
1709 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
1710 raise ValueError(message)
1711
1712 # Ignore 404 errors on delete.
1713 self.delete_blobs(
1714 blobs,
1715 on_error=lambda blob: None,
1716 client=client,
1717 timeout=timeout,
1718 retry=retry,
1719 preserve_generation=True,
1720 )
1721
1722 # We intentionally pass `_target_object=None` since a DELETE
1723 # request has no response value (whether in a standard request or
1724 # in a batch request).
1725 client._delete_resource(
1726 self.path,
1727 query_params=query_params,
1728 timeout=timeout,
1729 retry=retry,
1730 _target_object=None,
1731 )
1732
1733 def delete_blob(
1734 self,
1735 blob_name,
1736 client=None,
1737 generation=None,
1738 if_generation_match=None,
1739 if_generation_not_match=None,
1740 if_metageneration_match=None,
1741 if_metageneration_not_match=None,
1742 timeout=_DEFAULT_TIMEOUT,
1743 retry=DEFAULT_RETRY,
1744 ):
1745 """Deletes a blob from the current bucket.
1746
1747 If :attr:`user_project` is set, bills the API request to that project.
1748
1749 :type blob_name: str
1750 :param blob_name: A blob name to delete.
1751
1752 :type client: :class:`~google.cloud.storage.client.Client` or
1753 ``NoneType``
1754 :param client: (Optional) The client to use. If not passed, falls back
1755 to the ``client`` stored on the current bucket.
1756
1757 :type generation: long
1758 :param generation: (Optional) If present, permanently deletes a specific
1759 revision of this object.
1760
1761 :type if_generation_match: long
1762 :param if_generation_match:
1763 (Optional) See :ref:`using-if-generation-match`
1764
1765 :type if_generation_not_match: long
1766 :param if_generation_not_match:
1767 (Optional) See :ref:`using-if-generation-not-match`
1768
1769 :type if_metageneration_match: long
1770 :param if_metageneration_match:
1771 (Optional) See :ref:`using-if-metageneration-match`
1772
1773 :type if_metageneration_not_match: long
1774 :param if_metageneration_not_match:
1775 (Optional) See :ref:`using-if-metageneration-not-match`
1776
1777 :type timeout: float or tuple
1778 :param timeout:
1779 (Optional) The amount of time, in seconds, to wait
1780 for the server response. See: :ref:`configuring_timeouts`
1781
1782 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1783 :param retry: (Optional) How to retry the RPC. A None value will disable
1784 retries. A google.api_core.retry.Retry value will enable retries,
1785 and the object will define retriable response codes and errors and
1786 configure backoff and timeout options.
1787
1788 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1789 Retry object and activates it only if certain conditions are met.
1790 This class exists to provide safe defaults for RPC calls that are
1791 not technically safe to retry normally (due to potential data
1792 duplication or other side-effects) but become safe to retry if a
1793 condition such as if_generation_match is set.
1794
1795 See the retry.py source code and docstrings in this package
1796 (google.cloud.storage.retry) for information on retry types and how
1797 to configure them.
1798
1799 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound
1800 if the blob isn't found. To suppress
1801 the exception, use :meth:`delete_blobs` by passing a no-op
1802 ``on_error`` callback.
1803 """
1804 with create_trace_span(name="Storage.Bucket.deleteBlob"):
1805 client = self._require_client(client)
1806 blob = Blob(blob_name, bucket=self, generation=generation)
1807
1808 query_params = copy.deepcopy(blob._query_params)
1809 _add_generation_match_parameters(
1810 query_params,
1811 if_generation_match=if_generation_match,
1812 if_generation_not_match=if_generation_not_match,
1813 if_metageneration_match=if_metageneration_match,
1814 if_metageneration_not_match=if_metageneration_not_match,
1815 )
1816 # We intentionally pass `_target_object=None` since a DELETE
1817 # request has no response value (whether in a standard request or
1818 # in a batch request).
1819 client._delete_resource(
1820 blob.path,
1821 query_params=query_params,
1822 timeout=timeout,
1823 retry=retry,
1824 _target_object=None,
1825 )
1826
1827 def delete_blobs(
1828 self,
1829 blobs,
1830 on_error=None,
1831 client=None,
1832 preserve_generation=False,
1833 timeout=_DEFAULT_TIMEOUT,
1834 if_generation_match=None,
1835 if_generation_not_match=None,
1836 if_metageneration_match=None,
1837 if_metageneration_not_match=None,
1838 retry=DEFAULT_RETRY,
1839 ):
1840 """Deletes a list of blobs from the current bucket.
1841
1842 Uses :meth:`delete_blob` to delete each individual blob.
1843
1844 By default, any generation information in the list of blobs is ignored, and the
1845 live versions of all blobs are deleted. Set `preserve_generation` to True
1846 if blob generation should instead be propagated from the list of blobs.
1847
1848 If :attr:`user_project` is set, bills the API request to that project.
1849
1850 :type blobs: list
1851 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
1852 blob names to delete.
1853
1854 :type on_error: callable
1855 :param on_error: (Optional) Takes single argument: ``blob``.
1856 Called once for each blob raising
1857 :class:`~google.cloud.exceptions.NotFound`;
1858 otherwise, the exception is propagated.
1859 Note that ``on_error`` is not supported in a ``Batch`` context.
1860
1861 :type client: :class:`~google.cloud.storage.client.Client`
1862 :param client: (Optional) The client to use. If not passed, falls back
1863 to the ``client`` stored on the current bucket.
1864
1865 :type preserve_generation: bool
1866 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object,
1867 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob
1868 objects can have their generation set in this way.
1869 Default: False.
1870
1871 :type if_generation_match: list of long
1872 :param if_generation_match:
1873 (Optional) See :ref:`using-if-generation-match`
1874 Note that the length of the list must match the length of
1875 The list must match ``blobs`` item-to-item.
1876
1877 :type if_generation_not_match: list of long
1878 :param if_generation_not_match:
1879 (Optional) See :ref:`using-if-generation-not-match`
1880 The list must match ``blobs`` item-to-item.
1881
1882 :type if_metageneration_match: list of long
1883 :param if_metageneration_match:
1884 (Optional) See :ref:`using-if-metageneration-match`
1885 The list must match ``blobs`` item-to-item.
1886
1887 :type if_metageneration_not_match: list of long
1888 :param if_metageneration_not_match:
1889 (Optional) See :ref:`using-if-metageneration-not-match`
1890 The list must match ``blobs`` item-to-item.
1891
1892 :type timeout: float or tuple
1893 :param timeout:
1894 (Optional) The amount of time, in seconds, to wait
1895 for the server response. See: :ref:`configuring_timeouts`
1896
1897 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1898 :param retry: (Optional) How to retry the RPC. A None value will disable
1899 retries. A google.api_core.retry.Retry value will enable retries,
1900 and the object will define retriable response codes and errors and
1901 configure backoff and timeout options.
1902
1903 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1904 Retry object and activates it only if certain conditions are met.
1905 This class exists to provide safe defaults for RPC calls that are
1906 not technically safe to retry normally (due to potential data
1907 duplication or other side-effects) but become safe to retry if a
1908 condition such as if_generation_match is set.
1909
1910 See the retry.py source code and docstrings in this package
1911 (google.cloud.storage.retry) for information on retry types and how
1912 to configure them.
1913
1914 :raises: :class:`~google.cloud.exceptions.NotFound` (if
1915 `on_error` is not passed).
1916 """
1917 with create_trace_span(name="Storage.Bucket.deleteBlobs"):
1918 _raise_if_len_differs(
1919 len(blobs),
1920 if_generation_match=if_generation_match,
1921 if_generation_not_match=if_generation_not_match,
1922 if_metageneration_match=if_metageneration_match,
1923 if_metageneration_not_match=if_metageneration_not_match,
1924 )
1925 if_generation_match = iter(if_generation_match or [])
1926 if_generation_not_match = iter(if_generation_not_match or [])
1927 if_metageneration_match = iter(if_metageneration_match or [])
1928 if_metageneration_not_match = iter(if_metageneration_not_match or [])
1929
1930 for blob in blobs:
1931 try:
1932 blob_name = blob
1933 generation = None
1934 if not isinstance(blob_name, str):
1935 blob_name = blob.name
1936 generation = blob.generation if preserve_generation else None
1937
1938 self.delete_blob(
1939 blob_name,
1940 client=client,
1941 generation=generation,
1942 if_generation_match=next(if_generation_match, None),
1943 if_generation_not_match=next(if_generation_not_match, None),
1944 if_metageneration_match=next(if_metageneration_match, None),
1945 if_metageneration_not_match=next(
1946 if_metageneration_not_match, None
1947 ),
1948 timeout=timeout,
1949 retry=retry,
1950 )
1951 except NotFound:
1952 if on_error is not None:
1953 on_error(blob)
1954 else:
1955 raise
1956
1957 def copy_blob(
1958 self,
1959 blob,
1960 destination_bucket,
1961 new_name=None,
1962 client=None,
1963 preserve_acl=True,
1964 source_generation=None,
1965 if_generation_match=None,
1966 if_generation_not_match=None,
1967 if_metageneration_match=None,
1968 if_metageneration_not_match=None,
1969 if_source_generation_match=None,
1970 if_source_generation_not_match=None,
1971 if_source_metageneration_match=None,
1972 if_source_metageneration_not_match=None,
1973 timeout=_DEFAULT_TIMEOUT,
1974 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1975 ):
1976 """Copy the given blob to the given bucket, optionally with a new name.
1977
1978 If :attr:`user_project` is set, bills the API request to that project.
1979
1980 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy)
1981 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python).
1982
1983 :type blob: :class:`google.cloud.storage.blob.Blob`
1984 :param blob: The blob to be copied.
1985
1986 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
1987 :param destination_bucket: The bucket into which the blob should be
1988 copied.
1989
1990 :type new_name: str
1991 :param new_name: (Optional) The new name for the copied file.
1992
1993 :type client: :class:`~google.cloud.storage.client.Client` or
1994 ``NoneType``
1995 :param client: (Optional) The client to use. If not passed, falls back
1996 to the ``client`` stored on the current bucket.
1997
1998 :type preserve_acl: bool
1999 :param preserve_acl: DEPRECATED. This argument is not functional!
2000 (Optional) Copies ACL from old blob to new blob.
2001 Default: True.
2002 Note that ``preserve_acl`` is not supported in a
2003 ``Batch`` context.
2004
2005 :type source_generation: long
2006 :param source_generation: (Optional) The generation of the blob to be
2007 copied.
2008
2009 :type if_generation_match: long
2010 :param if_generation_match:
2011 (Optional) See :ref:`using-if-generation-match`
2012 Note that the generation to be matched is that of the
2013 ``destination`` blob.
2014
2015 :type if_generation_not_match: long
2016 :param if_generation_not_match:
2017 (Optional) See :ref:`using-if-generation-not-match`
2018 Note that the generation to be matched is that of the
2019 ``destination`` blob.
2020
2021 :type if_metageneration_match: long
2022 :param if_metageneration_match:
2023 (Optional) See :ref:`using-if-metageneration-match`
2024 Note that the metageneration to be matched is that of the
2025 ``destination`` blob.
2026
2027 :type if_metageneration_not_match: long
2028 :param if_metageneration_not_match:
2029 (Optional) See :ref:`using-if-metageneration-not-match`
2030 Note that the metageneration to be matched is that of the
2031 ``destination`` blob.
2032
2033 :type if_source_generation_match: long
2034 :param if_source_generation_match:
2035 (Optional) Makes the operation conditional on whether the source
2036 object's generation matches the given value.
2037
2038 :type if_source_generation_not_match: long
2039 :param if_source_generation_not_match:
2040 (Optional) Makes the operation conditional on whether the source
2041 object's generation does not match the given value.
2042
2043 :type if_source_metageneration_match: long
2044 :param if_source_metageneration_match:
2045 (Optional) Makes the operation conditional on whether the source
2046 object's current metageneration matches the given value.
2047
2048 :type if_source_metageneration_not_match: long
2049 :param if_source_metageneration_not_match:
2050 (Optional) Makes the operation conditional on whether the source
2051 object's current metageneration does not match the given value.
2052
2053 :type timeout: float or tuple
2054 :param timeout:
2055 (Optional) The amount of time, in seconds, to wait
2056 for the server response. See: :ref:`configuring_timeouts`
2057
2058 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2059 :param retry:
2060 (Optional) How to retry the RPC.
2061 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2062 policy which will only enable retries if ``if_generation_match`` or ``generation``
2063 is set, in order to ensure requests are idempotent before retrying them.
2064 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2065 to enable retries regardless of generation precondition setting.
2066 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2067
2068 :rtype: :class:`google.cloud.storage.blob.Blob`
2069 :returns: The new Blob.
2070 """
2071 with create_trace_span(name="Storage.Bucket.copyBlob"):
2072 client = self._require_client(client)
2073 query_params = {}
2074
2075 if self.user_project is not None:
2076 query_params["userProject"] = self.user_project
2077
2078 if source_generation is not None:
2079 query_params["sourceGeneration"] = source_generation
2080
2081 _add_generation_match_parameters(
2082 query_params,
2083 if_generation_match=if_generation_match,
2084 if_generation_not_match=if_generation_not_match,
2085 if_metageneration_match=if_metageneration_match,
2086 if_metageneration_not_match=if_metageneration_not_match,
2087 if_source_generation_match=if_source_generation_match,
2088 if_source_generation_not_match=if_source_generation_not_match,
2089 if_source_metageneration_match=if_source_metageneration_match,
2090 if_source_metageneration_not_match=if_source_metageneration_not_match,
2091 )
2092
2093 if new_name is None:
2094 new_name = blob.name
2095
2096 new_blob = Blob(bucket=destination_bucket, name=new_name)
2097 api_path = blob.path + "/copyTo" + new_blob.path
2098 copy_result = client._post_resource(
2099 api_path,
2100 None,
2101 query_params=query_params,
2102 timeout=timeout,
2103 retry=retry,
2104 _target_object=new_blob,
2105 )
2106
2107 if not preserve_acl:
2108 new_blob.acl.save(acl={}, client=client, timeout=timeout)
2109
2110 new_blob._set_properties(copy_result)
2111 return new_blob
2112
2113 def rename_blob(
2114 self,
2115 blob,
2116 new_name,
2117 client=None,
2118 if_generation_match=None,
2119 if_generation_not_match=None,
2120 if_metageneration_match=None,
2121 if_metageneration_not_match=None,
2122 if_source_generation_match=None,
2123 if_source_generation_not_match=None,
2124 if_source_metageneration_match=None,
2125 if_source_metageneration_not_match=None,
2126 timeout=_DEFAULT_TIMEOUT,
2127 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2128 ):
2129 """Rename the given blob using copy and delete operations.
2130
2131 If :attr:`user_project` is set, bills the API request to that project.
2132
2133 Effectively, copies blob to the same bucket with a new name, then
2134 deletes the blob.
2135
2136 .. warning::
2137
2138 This method will first duplicate the data and then delete the
2139 old blob. This means that with very large objects renaming
2140 could be a very (temporarily) costly or a very slow operation.
2141 If you need more control over the copy and deletion, instead
2142 use ``google.cloud.storage.blob.Blob.copy_to`` and
2143 ``google.cloud.storage.blob.Blob.delete`` directly.
2144
2145 Also note that this method is not fully supported in a
2146 ``Batch`` context.
2147
2148 :type blob: :class:`google.cloud.storage.blob.Blob`
2149 :param blob: The blob to be renamed.
2150
2151 :type new_name: str
2152 :param new_name: The new name for this blob.
2153
2154 :type client: :class:`~google.cloud.storage.client.Client` or
2155 ``NoneType``
2156 :param client: (Optional) The client to use. If not passed, falls back
2157 to the ``client`` stored on the current bucket.
2158
2159 :type if_generation_match: long
2160 :param if_generation_match:
2161 (Optional) See :ref:`using-if-generation-match`
2162 Note that the generation to be matched is that of the
2163 ``destination`` blob.
2164
2165 :type if_generation_not_match: long
2166 :param if_generation_not_match:
2167 (Optional) See :ref:`using-if-generation-not-match`
2168 Note that the generation to be matched is that of the
2169 ``destination`` blob.
2170
2171 :type if_metageneration_match: long
2172 :param if_metageneration_match:
2173 (Optional) See :ref:`using-if-metageneration-match`
2174 Note that the metageneration to be matched is that of the
2175 ``destination`` blob.
2176
2177 :type if_metageneration_not_match: long
2178 :param if_metageneration_not_match:
2179 (Optional) See :ref:`using-if-metageneration-not-match`
2180 Note that the metageneration to be matched is that of the
2181 ``destination`` blob.
2182
2183 :type if_source_generation_match: long
2184 :param if_source_generation_match:
2185 (Optional) Makes the operation conditional on whether the source
2186 object's generation matches the given value. Also used in the
2187 (implied) delete request.
2188
2189 :type if_source_generation_not_match: long
2190 :param if_source_generation_not_match:
2191 (Optional) Makes the operation conditional on whether the source
2192 object's generation does not match the given value. Also used in
2193 the (implied) delete request.
2194
2195 :type if_source_metageneration_match: long
2196 :param if_source_metageneration_match:
2197 (Optional) Makes the operation conditional on whether the source
2198 object's current metageneration matches the given value. Also used
2199 in the (implied) delete request.
2200
2201 :type if_source_metageneration_not_match: long
2202 :param if_source_metageneration_not_match:
2203 (Optional) Makes the operation conditional on whether the source
2204 object's current metageneration does not match the given value.
2205 Also used in the (implied) delete request.
2206
2207 :type timeout: float or tuple
2208 :param timeout:
2209 (Optional) The amount of time, in seconds, to wait
2210 for the server response. See: :ref:`configuring_timeouts`
2211
2212 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2213 :param retry:
2214 (Optional) How to retry the RPC.
2215 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2216 policy which will only enable retries if ``if_generation_match`` or ``generation``
2217 is set, in order to ensure requests are idempotent before retrying them.
2218 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2219 to enable retries regardless of generation precondition setting.
2220 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2221
2222 :rtype: :class:`Blob`
2223 :returns: The newly-renamed blob.
2224 """
2225 with create_trace_span(name="Storage.Bucket.renameBlob"):
2226 same_name = blob.name == new_name
2227
2228 new_blob = self.copy_blob(
2229 blob,
2230 self,
2231 new_name,
2232 client=client,
2233 timeout=timeout,
2234 if_generation_match=if_generation_match,
2235 if_generation_not_match=if_generation_not_match,
2236 if_metageneration_match=if_metageneration_match,
2237 if_metageneration_not_match=if_metageneration_not_match,
2238 if_source_generation_match=if_source_generation_match,
2239 if_source_generation_not_match=if_source_generation_not_match,
2240 if_source_metageneration_match=if_source_metageneration_match,
2241 if_source_metageneration_not_match=if_source_metageneration_not_match,
2242 retry=retry,
2243 )
2244
2245 if not same_name:
2246 blob.delete(
2247 client=client,
2248 timeout=timeout,
2249 if_generation_match=if_source_generation_match,
2250 if_generation_not_match=if_source_generation_not_match,
2251 if_metageneration_match=if_source_metageneration_match,
2252 if_metageneration_not_match=if_source_metageneration_not_match,
2253 retry=retry,
2254 )
2255 return new_blob
2256
2257 def move_blob(
2258 self,
2259 blob,
2260 new_name,
2261 client=None,
2262 if_generation_match=None,
2263 if_generation_not_match=None,
2264 if_metageneration_match=None,
2265 if_metageneration_not_match=None,
2266 if_source_generation_match=None,
2267 if_source_generation_not_match=None,
2268 if_source_metageneration_match=None,
2269 if_source_metageneration_not_match=None,
2270 timeout=_DEFAULT_TIMEOUT,
2271 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2272 ):
2273 """Move a blob to a new name atomically.
2274
2275 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2276
2277 :type blob: :class:`google.cloud.storage.blob.Blob`
2278 :param blob: The blob to be renamed.
2279
2280 :type new_name: str
2281 :param new_name: The new name for this blob.
2282
2283 :type client: :class:`~google.cloud.storage.client.Client` or
2284 ``NoneType``
2285 :param client: (Optional) The client to use. If not passed, falls back
2286 to the ``client`` stored on the current bucket.
2287
2288 :type if_generation_match: int
2289 :param if_generation_match:
2290 (Optional) See :ref:`using-if-generation-match`
2291 Note that the generation to be matched is that of the
2292 ``destination`` blob.
2293
2294 :type if_generation_not_match: int
2295 :param if_generation_not_match:
2296 (Optional) See :ref:`using-if-generation-not-match`
2297 Note that the generation to be matched is that of the
2298 ``destination`` blob.
2299
2300 :type if_metageneration_match: int
2301 :param if_metageneration_match:
2302 (Optional) See :ref:`using-if-metageneration-match`
2303 Note that the metageneration to be matched is that of the
2304 ``destination`` blob.
2305
2306 :type if_metageneration_not_match: int
2307 :param if_metageneration_not_match:
2308 (Optional) See :ref:`using-if-metageneration-not-match`
2309 Note that the metageneration to be matched is that of the
2310 ``destination`` blob.
2311
2312 :type if_source_generation_match: int
2313 :param if_source_generation_match:
2314 (Optional) Makes the operation conditional on whether the source
2315 object's generation matches the given value.
2316
2317 :type if_source_generation_not_match: int
2318 :param if_source_generation_not_match:
2319 (Optional) Makes the operation conditional on whether the source
2320 object's generation does not match the given value.
2321
2322 :type if_source_metageneration_match: int
2323 :param if_source_metageneration_match:
2324 (Optional) Makes the operation conditional on whether the source
2325 object's current metageneration matches the given value.
2326
2327 :type if_source_metageneration_not_match: int
2328 :param if_source_metageneration_not_match:
2329 (Optional) Makes the operation conditional on whether the source
2330 object's current metageneration does not match the given value.
2331
2332 :type timeout: float or tuple
2333 :param timeout:
2334 (Optional) The amount of time, in seconds, to wait
2335 for the server response. See: :ref:`configuring_timeouts`
2336
2337 :type retry: google.api_core.retry.Retry
2338 :param retry:
2339 (Optional) How to retry the RPC.
2340 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2341
2342 :rtype: :class:`Blob`
2343 :returns: The newly-moved blob.
2344 """
2345 with create_trace_span(name="Storage.Bucket.moveBlob"):
2346 client = self._require_client(client)
2347 query_params = {}
2348
2349 if self.user_project is not None:
2350 query_params["userProject"] = self.user_project
2351
2352 _add_generation_match_parameters(
2353 query_params,
2354 if_generation_match=if_generation_match,
2355 if_generation_not_match=if_generation_not_match,
2356 if_metageneration_match=if_metageneration_match,
2357 if_metageneration_not_match=if_metageneration_not_match,
2358 if_source_generation_match=if_source_generation_match,
2359 if_source_generation_not_match=if_source_generation_not_match,
2360 if_source_metageneration_match=if_source_metageneration_match,
2361 if_source_metageneration_not_match=if_source_metageneration_not_match,
2362 )
2363
2364 new_blob = Blob(bucket=self, name=new_name)
2365 api_path = blob.path + "/moveTo/o/" + new_blob.name
2366 move_result = client._post_resource(
2367 api_path,
2368 None,
2369 query_params=query_params,
2370 timeout=timeout,
2371 retry=retry,
2372 _target_object=new_blob,
2373 )
2374
2375 new_blob._set_properties(move_result)
2376 return new_blob
2377
2378 def restore_blob(
2379 self,
2380 blob_name,
2381 client=None,
2382 generation=None,
2383 copy_source_acl=None,
2384 projection=None,
2385 if_generation_match=None,
2386 if_generation_not_match=None,
2387 if_metageneration_match=None,
2388 if_metageneration_not_match=None,
2389 timeout=_DEFAULT_TIMEOUT,
2390 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2391 ):
2392 """Restores a soft-deleted object.
2393
2394 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2395
2396 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore)
2397
2398 :type blob_name: str
2399 :param blob_name: The name of the blob to be restored.
2400
2401 :type client: :class:`~google.cloud.storage.client.Client`
2402 :param client: (Optional) The client to use. If not passed, falls back
2403 to the ``client`` stored on the current bucket.
2404
2405 :type generation: int
2406 :param generation: Selects the specific revision of the object.
2407
2408 :type copy_source_acl: bool
2409 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls.
2410
2411 :type projection: str
2412 :param projection: (Optional) Specifies the set of properties to return.
2413 If used, must be 'full' or 'noAcl'.
2414
2415 :type if_generation_match: long
2416 :param if_generation_match:
2417 (Optional) See :ref:`using-if-generation-match`
2418
2419 :type if_generation_not_match: long
2420 :param if_generation_not_match:
2421 (Optional) See :ref:`using-if-generation-not-match`
2422
2423 :type if_metageneration_match: long
2424 :param if_metageneration_match:
2425 (Optional) See :ref:`using-if-metageneration-match`
2426
2427 :type if_metageneration_not_match: long
2428 :param if_metageneration_not_match:
2429 (Optional) See :ref:`using-if-metageneration-not-match`
2430
2431 :type timeout: float or tuple
2432 :param timeout:
2433 (Optional) The amount of time, in seconds, to wait
2434 for the server response. See: :ref:`configuring_timeouts`
2435
2436 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2437 :param retry:
2438 (Optional) How to retry the RPC.
2439 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which
2440 only restore operations with ``if_generation_match`` or ``generation`` set
2441 will be retried.
2442
2443 Users can configure non-default retry behavior. A ``None`` value will
2444 disable retries. A ``DEFAULT_RETRY`` value will enable retries
2445 even if restore operations are not guaranteed to be idempotent.
2446 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2447
2448 :rtype: :class:`google.cloud.storage.blob.Blob`
2449 :returns: The restored Blob.
2450 """
2451 with create_trace_span(name="Storage.Bucket.restore_blob"):
2452 client = self._require_client(client)
2453 query_params = {}
2454
2455 if self.user_project is not None:
2456 query_params["userProject"] = self.user_project
2457 if generation is not None:
2458 query_params["generation"] = generation
2459 if copy_source_acl is not None:
2460 query_params["copySourceAcl"] = copy_source_acl
2461 if projection is not None:
2462 query_params["projection"] = projection
2463
2464 _add_generation_match_parameters(
2465 query_params,
2466 if_generation_match=if_generation_match,
2467 if_generation_not_match=if_generation_not_match,
2468 if_metageneration_match=if_metageneration_match,
2469 if_metageneration_not_match=if_metageneration_not_match,
2470 )
2471
2472 blob = Blob(bucket=self, name=blob_name)
2473 api_response = client._post_resource(
2474 f"{blob.path}/restore",
2475 None,
2476 query_params=query_params,
2477 timeout=timeout,
2478 retry=retry,
2479 )
2480 blob._set_properties(api_response)
2481 return blob
2482
2483 @property
2484 def cors(self):
2485 """Retrieve or set CORS policies configured for this bucket.
2486
2487 See http://www.w3.org/TR/cors/ and
2488 https://cloud.google.com/storage/docs/json_api/v1/buckets
2489
2490 .. note::
2491
2492 The getter for this property returns a list which contains
2493 *copies* of the bucket's CORS policy mappings. Mutating the list
2494 or one of its dicts has no effect unless you then re-assign the
2495 dict via the setter. E.g.:
2496
2497 >>> policies = bucket.cors
2498 >>> policies.append({'origin': '/foo', ...})
2499 >>> policies[1]['maxAgeSeconds'] = 3600
2500 >>> del policies[0]
2501 >>> bucket.cors = policies
2502 >>> bucket.update()
2503
2504 :setter: Set CORS policies for this bucket.
2505 :getter: Gets the CORS policies for this bucket.
2506
2507 :rtype: list of dictionaries
2508 :returns: A sequence of mappings describing each CORS policy.
2509 """
2510 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())]
2511
2512 @cors.setter
2513 def cors(self, entries):
2514 """Set CORS policies configured for this bucket.
2515
2516 See http://www.w3.org/TR/cors/ and
2517 https://cloud.google.com/storage/docs/json_api/v1/buckets
2518
2519 :type entries: list of dictionaries
2520 :param entries: A sequence of mappings describing each CORS policy.
2521 """
2522 self._patch_property("cors", entries)
2523
2524 default_event_based_hold = _scalar_property("defaultEventBasedHold")
2525 """Are uploaded objects automatically placed under an even-based hold?
2526
2527 If True, uploaded objects will be placed under an event-based hold to
2528 be released at a future time. When released an object will then begin
2529 the retention period determined by the policy retention period for the
2530 object bucket.
2531
2532 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2533
2534 If the property is not set locally, returns ``None``.
2535
2536 :rtype: bool or ``NoneType``
2537 """
2538
2539 @property
2540 def default_kms_key_name(self):
2541 """Retrieve / set default KMS encryption key for objects in the bucket.
2542
2543 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2544
2545 :setter: Set default KMS encryption key for items in this bucket.
2546 :getter: Get default KMS encryption key for items in this bucket.
2547
2548 :rtype: str
2549 :returns: Default KMS encryption key, or ``None`` if not set.
2550 """
2551 encryption_config = self._properties.get("encryption", {})
2552 return encryption_config.get("defaultKmsKeyName")
2553
2554 @default_kms_key_name.setter
2555 def default_kms_key_name(self, value):
2556 """Set default KMS encryption key for objects in the bucket.
2557
2558 :type value: str or None
2559 :param value: new KMS key name (None to clear any existing key).
2560 """
2561 encryption_config = self._properties.get("encryption", {})
2562 encryption_config["defaultKmsKeyName"] = value
2563 self._patch_property("encryption", encryption_config)
2564
2565 @property
2566 def labels(self):
2567 """Retrieve or set labels assigned to this bucket.
2568
2569 See
2570 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2571
2572 .. note::
2573
2574 The getter for this property returns a dict which is a *copy*
2575 of the bucket's labels. Mutating that dict has no effect unless
2576 you then re-assign the dict via the setter. E.g.:
2577
2578 >>> labels = bucket.labels
2579 >>> labels['new_key'] = 'some-label'
2580 >>> del labels['old_key']
2581 >>> bucket.labels = labels
2582 >>> bucket.update()
2583
2584 :setter: Set labels for this bucket.
2585 :getter: Gets the labels for this bucket.
2586
2587 :rtype: :class:`dict`
2588 :returns: Name-value pairs (string->string) labelling the bucket.
2589 """
2590 labels = self._properties.get("labels")
2591 if labels is None:
2592 return {}
2593 return copy.deepcopy(labels)
2594
2595 @labels.setter
2596 def labels(self, mapping):
2597 """Set labels assigned to this bucket.
2598
2599 See
2600 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2601
2602 :type mapping: :class:`dict`
2603 :param mapping: Name-value pairs (string->string) labelling the bucket.
2604 """
2605 # If any labels have been expressly removed, we need to track this
2606 # so that a future .patch() call can do the correct thing.
2607 existing = set([k for k in self.labels.keys()])
2608 incoming = set([k for k in mapping.keys()])
2609 self._label_removals = self._label_removals.union(existing.difference(incoming))
2610 mapping = {k: str(v) for k, v in mapping.items()}
2611
2612 # Actually update the labels on the object.
2613 self._patch_property("labels", copy.deepcopy(mapping))
2614
2615 @property
2616 def etag(self):
2617 """Retrieve the ETag for the bucket.
2618
2619 See https://tools.ietf.org/html/rfc2616#section-3.11 and
2620 https://cloud.google.com/storage/docs/json_api/v1/buckets
2621
2622 :rtype: str or ``NoneType``
2623 :returns: The bucket etag or ``None`` if the bucket's
2624 resource has not been loaded from the server.
2625 """
2626 return self._properties.get("etag")
2627
2628 @property
2629 def id(self):
2630 """Retrieve the ID for the bucket.
2631
2632 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2633
2634 :rtype: str or ``NoneType``
2635 :returns: The ID of the bucket or ``None`` if the bucket's
2636 resource has not been loaded from the server.
2637 """
2638 return self._properties.get("id")
2639
2640 @property
2641 def iam_configuration(self):
2642 """Retrieve IAM configuration for this bucket.
2643
2644 :rtype: :class:`IAMConfiguration`
2645 :returns: an instance for managing the bucket's IAM configuration.
2646 """
2647 info = self._properties.get("iamConfiguration", {})
2648 return IAMConfiguration.from_api_repr(info, self)
2649
2650 @property
2651 def soft_delete_policy(self):
2652 """Retrieve the soft delete policy for this bucket.
2653
2654 See https://cloud.google.com/storage/docs/soft-delete
2655
2656 :rtype: :class:`SoftDeletePolicy`
2657 :returns: an instance for managing the bucket's soft delete policy.
2658 """
2659 policy = self._properties.get("softDeletePolicy", {})
2660 return SoftDeletePolicy.from_api_repr(policy, self)
2661
2662 @property
2663 def lifecycle_rules(self):
2664 """Retrieve or set lifecycle rules configured for this bucket.
2665
2666 See https://cloud.google.com/storage/docs/lifecycle and
2667 https://cloud.google.com/storage/docs/json_api/v1/buckets
2668
2669 .. note::
2670
2671 The getter for this property returns a generator which yields
2672 *copies* of the bucket's lifecycle rules mappings. Mutating the
2673 output dicts has no effect unless you then re-assign the dict via
2674 the setter. E.g.:
2675
2676 >>> rules = list(bucket.lifecycle_rules)
2677 >>> rules.append({'origin': '/foo', ...})
2678 >>> rules[1]['rule']['action']['type'] = 'Delete'
2679 >>> del rules[0]
2680 >>> bucket.lifecycle_rules = rules
2681 >>> bucket.update()
2682
2683 :setter: Set lifecycle rules for this bucket.
2684 :getter: Gets the lifecycle rules for this bucket.
2685
2686 :rtype: generator(dict)
2687 :returns: A sequence of mappings describing each lifecycle rule.
2688 """
2689 info = self._properties.get("lifecycle", {})
2690 for rule in info.get("rule", ()):
2691 action_type = rule["action"]["type"]
2692 if action_type == "Delete":
2693 yield LifecycleRuleDelete.from_api_repr(rule)
2694 elif action_type == "SetStorageClass":
2695 yield LifecycleRuleSetStorageClass.from_api_repr(rule)
2696 elif action_type == "AbortIncompleteMultipartUpload":
2697 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule)
2698 else:
2699 warnings.warn(
2700 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format(
2701 rule
2702 ),
2703 UserWarning,
2704 stacklevel=1,
2705 )
2706
2707 @lifecycle_rules.setter
2708 def lifecycle_rules(self, rules):
2709 """Set lifecycle rules configured for this bucket.
2710
2711 See https://cloud.google.com/storage/docs/lifecycle and
2712 https://cloud.google.com/storage/docs/json_api/v1/buckets
2713
2714 :type rules: list of dictionaries
2715 :param rules: A sequence of mappings describing each lifecycle rule.
2716 """
2717 rules = [dict(rule) for rule in rules] # Convert helpers if needed
2718 self._patch_property("lifecycle", {"rule": rules})
2719
2720 def clear_lifecycle_rules(self):
2721 """Clear lifecycle rules configured for this bucket.
2722
2723 See https://cloud.google.com/storage/docs/lifecycle and
2724 https://cloud.google.com/storage/docs/json_api/v1/buckets
2725 """
2726 self.lifecycle_rules = []
2727
2728 def clear_lifecyle_rules(self):
2729 """Deprecated alias for clear_lifecycle_rules."""
2730 return self.clear_lifecycle_rules()
2731
2732 def add_lifecycle_delete_rule(self, **kw):
2733 """Add a "delete" rule to lifecycle rules configured for this bucket.
2734
2735 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2736 which is set on the bucket. For the general format of a lifecycle configuration, see the
2737 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2738 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python).
2739
2740 :type kw: dict
2741 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2742 """
2743 rules = list(self.lifecycle_rules)
2744 rules.append(LifecycleRuleDelete(**kw))
2745 self.lifecycle_rules = rules
2746
2747 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
2748 """Add a "set storage class" rule to lifecycle rules.
2749
2750 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2751 which is set on the bucket. For the general format of a lifecycle configuration, see the
2752 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2753
2754 :type storage_class: str, one of :attr:`STORAGE_CLASSES`.
2755 :param storage_class: new storage class to assign to matching items.
2756
2757 :type kw: dict
2758 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2759 """
2760 rules = list(self.lifecycle_rules)
2761 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
2762 self.lifecycle_rules = rules
2763
2764 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw):
2765 """Add a "abort incomplete multipart upload" rule to lifecycle rules.
2766
2767 .. note::
2768 The "age" lifecycle condition is the only supported condition
2769 for this rule.
2770
2771 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2772 which is set on the bucket. For the general format of a lifecycle configuration, see the
2773 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2774
2775 :type kw: dict
2776 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2777 """
2778 rules = list(self.lifecycle_rules)
2779 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw))
2780 self.lifecycle_rules = rules
2781
2782 _location = _scalar_property("location")
2783
2784 @property
2785 def location(self):
2786 """Retrieve location configured for this bucket.
2787
2788 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2789 https://cloud.google.com/storage/docs/locations
2790
2791 Returns ``None`` if the property has not been set before creation,
2792 or if the bucket's resource has not been loaded from the server.
2793 :rtype: str or ``NoneType``
2794 """
2795 return self._location
2796
2797 @location.setter
2798 def location(self, value):
2799 """(Deprecated) Set `Bucket.location`
2800
2801 This can only be set at bucket **creation** time.
2802
2803 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2804 https://cloud.google.com/storage/docs/bucket-locations
2805
2806 .. warning::
2807
2808 Assignment to 'Bucket.location' is deprecated, as it is only
2809 valid before the bucket is created. Instead, pass the location
2810 to `Bucket.create`.
2811 """
2812 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
2813 self._location = value
2814
2815 @property
2816 def data_locations(self):
2817 """Retrieve the list of regional locations for custom dual-region buckets.
2818
2819 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2820 https://cloud.google.com/storage/docs/locations
2821
2822 Returns ``None`` if the property has not been set before creation,
2823 if the bucket's resource has not been loaded from the server,
2824 or if the bucket is not a dual-regions bucket.
2825 :rtype: list of str or ``NoneType``
2826 """
2827 custom_placement_config = self._properties.get("customPlacementConfig", {})
2828 return custom_placement_config.get("dataLocations")
2829
2830 @property
2831 def location_type(self):
2832 """Retrieve the location type for the bucket.
2833
2834 See https://cloud.google.com/storage/docs/storage-classes
2835
2836 :getter: Gets the the location type for this bucket.
2837
2838 :rtype: str or ``NoneType``
2839 :returns:
2840 If set, one of
2841 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`,
2842 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or
2843 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`,
2844 else ``None``.
2845 """
2846 return self._properties.get("locationType")
2847
2848 def get_logging(self):
2849 """Return info about access logging for this bucket.
2850
2851 See https://cloud.google.com/storage/docs/access-logs#status
2852
2853 :rtype: dict or None
2854 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
2855 (if logging is enabled), or None (if not).
2856 """
2857 info = self._properties.get("logging")
2858 return copy.deepcopy(info)
2859
2860 def enable_logging(self, bucket_name, object_prefix=""):
2861 """Enable access logging for this bucket.
2862
2863 See https://cloud.google.com/storage/docs/access-logs
2864
2865 :type bucket_name: str
2866 :param bucket_name: name of bucket in which to store access logs
2867
2868 :type object_prefix: str
2869 :param object_prefix: prefix for access log filenames
2870 """
2871 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
2872 self._patch_property("logging", info)
2873
2874 def disable_logging(self):
2875 """Disable access logging for this bucket.
2876
2877 See https://cloud.google.com/storage/docs/access-logs#disabling
2878 """
2879 self._patch_property("logging", None)
2880
2881 @property
2882 def metageneration(self):
2883 """Retrieve the metageneration for the bucket.
2884
2885 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2886
2887 :rtype: int or ``NoneType``
2888 :returns: The metageneration of the bucket or ``None`` if the bucket's
2889 resource has not been loaded from the server.
2890 """
2891 metageneration = self._properties.get("metageneration")
2892 if metageneration is not None:
2893 return int(metageneration)
2894
2895 @property
2896 def owner(self):
2897 """Retrieve info about the owner of the bucket.
2898
2899 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2900
2901 :rtype: dict or ``NoneType``
2902 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
2903 resource has not been loaded from the server.
2904 """
2905 return copy.deepcopy(self._properties.get("owner"))
2906
2907 @property
2908 def project_number(self):
2909 """Retrieve the number of the project to which the bucket is assigned.
2910
2911 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2912
2913 :rtype: int or ``NoneType``
2914 :returns: The project number that owns the bucket or ``None`` if
2915 the bucket's resource has not been loaded from the server.
2916 """
2917 project_number = self._properties.get("projectNumber")
2918 if project_number is not None:
2919 return int(project_number)
2920
2921 @property
2922 def retention_policy_effective_time(self):
2923 """Retrieve the effective time of the bucket's retention policy.
2924
2925 :rtype: datetime.datetime or ``NoneType``
2926 :returns: point-in time at which the bucket's retention policy is
2927 effective, or ``None`` if the property is not
2928 set locally.
2929 """
2930 policy = self._properties.get("retentionPolicy")
2931 if policy is not None:
2932 timestamp = policy.get("effectiveTime")
2933 if timestamp is not None:
2934 return _rfc3339_nanos_to_datetime(timestamp)
2935
2936 @property
2937 def retention_policy_locked(self):
2938 """Retrieve whthere the bucket's retention policy is locked.
2939
2940 :rtype: bool
2941 :returns: True if the bucket's policy is locked, or else False
2942 if the policy is not locked, or the property is not
2943 set locally.
2944 """
2945 policy = self._properties.get("retentionPolicy")
2946 if policy is not None:
2947 return policy.get("isLocked")
2948
2949 @property
2950 def retention_period(self):
2951 """Retrieve or set the retention period for items in the bucket.
2952
2953 :rtype: int or ``NoneType``
2954 :returns: number of seconds to retain items after upload or release
2955 from event-based lock, or ``None`` if the property is not
2956 set locally.
2957 """
2958 policy = self._properties.get("retentionPolicy")
2959 if policy is not None:
2960 period = policy.get("retentionPeriod")
2961 if period is not None:
2962 return int(period)
2963
2964 @retention_period.setter
2965 def retention_period(self, value):
2966 """Set the retention period for items in the bucket.
2967
2968 :type value: int
2969 :param value:
2970 number of seconds to retain items after upload or release from
2971 event-based lock.
2972
2973 :raises ValueError: if the bucket's retention policy is locked.
2974 """
2975 policy = self._properties.setdefault("retentionPolicy", {})
2976 if value is not None:
2977 policy["retentionPeriod"] = str(value)
2978 else:
2979 policy = None
2980 self._patch_property("retentionPolicy", policy)
2981
2982 @property
2983 def self_link(self):
2984 """Retrieve the URI for the bucket.
2985
2986 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2987
2988 :rtype: str or ``NoneType``
2989 :returns: The self link for the bucket or ``None`` if
2990 the bucket's resource has not been loaded from the server.
2991 """
2992 return self._properties.get("selfLink")
2993
2994 @property
2995 def storage_class(self):
2996 """Retrieve or set the storage class for the bucket.
2997
2998 See https://cloud.google.com/storage/docs/storage-classes
2999
3000 :setter: Set the storage class for this bucket.
3001 :getter: Gets the the storage class for this bucket.
3002
3003 :rtype: str or ``NoneType``
3004 :returns:
3005 If set, one of
3006 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3007 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3008 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3009 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3010 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3011 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3012 or
3013 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3014 else ``None``.
3015 """
3016 return self._properties.get("storageClass")
3017
3018 @storage_class.setter
3019 def storage_class(self, value):
3020 """Set the storage class for the bucket.
3021
3022 See https://cloud.google.com/storage/docs/storage-classes
3023
3024 :type value: str
3025 :param value:
3026 One of
3027 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3028 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3029 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3030 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3031 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3032 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3033 or
3034 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3035 """
3036 self._patch_property("storageClass", value)
3037
3038 @property
3039 def time_created(self):
3040 """Retrieve the timestamp at which the bucket was created.
3041
3042 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3043
3044 :rtype: :class:`datetime.datetime` or ``NoneType``
3045 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3046 ``None`` if the bucket's resource has not been loaded
3047 from the server.
3048 """
3049 value = self._properties.get("timeCreated")
3050 if value is not None:
3051 return _rfc3339_nanos_to_datetime(value)
3052
3053 @property
3054 def updated(self):
3055 """Retrieve the timestamp at which the bucket was last updated.
3056
3057 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3058
3059 :rtype: :class:`datetime.datetime` or ``NoneType``
3060 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3061 ``None`` if the bucket's resource has not been loaded
3062 from the server.
3063 """
3064 value = self._properties.get("updated")
3065 if value is not None:
3066 return _rfc3339_nanos_to_datetime(value)
3067
3068 @property
3069 def versioning_enabled(self):
3070 """Is versioning enabled for this bucket?
3071
3072 See https://cloud.google.com/storage/docs/object-versioning for
3073 details.
3074
3075 :setter: Update whether versioning is enabled for this bucket.
3076 :getter: Query whether versioning is enabled for this bucket.
3077
3078 :rtype: bool
3079 :returns: True if enabled, else False.
3080 """
3081 versioning = self._properties.get("versioning", {})
3082 return versioning.get("enabled", False)
3083
3084 @versioning_enabled.setter
3085 def versioning_enabled(self, value):
3086 """Enable versioning for this bucket.
3087
3088 See https://cloud.google.com/storage/docs/object-versioning for
3089 details.
3090
3091 :type value: convertible to boolean
3092 :param value: should versioning be enabled for the bucket?
3093 """
3094 self._patch_property("versioning", {"enabled": bool(value)})
3095
3096 @property
3097 def requester_pays(self):
3098 """Does the requester pay for API requests for this bucket?
3099
3100 See https://cloud.google.com/storage/docs/requester-pays for
3101 details.
3102
3103 :setter: Update whether requester pays for this bucket.
3104 :getter: Query whether requester pays for this bucket.
3105
3106 :rtype: bool
3107 :returns: True if requester pays for API requests for the bucket,
3108 else False.
3109 """
3110 versioning = self._properties.get("billing", {})
3111 return versioning.get("requesterPays", False)
3112
3113 @requester_pays.setter
3114 def requester_pays(self, value):
3115 """Update whether requester pays for API requests for this bucket.
3116
3117 See https://cloud.google.com/storage/docs/using-requester-pays for
3118 details.
3119
3120 :type value: convertible to boolean
3121 :param value: should requester pay for API requests for the bucket?
3122 """
3123 self._patch_property("billing", {"requesterPays": bool(value)})
3124
3125 @property
3126 def autoclass_enabled(self):
3127 """Whether Autoclass is enabled for this bucket.
3128
3129 See https://cloud.google.com/storage/docs/using-autoclass for details.
3130
3131 :setter: Update whether autoclass is enabled for this bucket.
3132 :getter: Query whether autoclass is enabled for this bucket.
3133
3134 :rtype: bool
3135 :returns: True if enabled, else False.
3136 """
3137 autoclass = self._properties.get("autoclass", {})
3138 return autoclass.get("enabled", False)
3139
3140 @autoclass_enabled.setter
3141 def autoclass_enabled(self, value):
3142 """Enable or disable Autoclass at the bucket-level.
3143
3144 See https://cloud.google.com/storage/docs/using-autoclass for details.
3145
3146 :type value: convertible to boolean
3147 :param value: If true, enable Autoclass for this bucket.
3148 If false, disable Autoclass for this bucket.
3149 """
3150 autoclass = self._properties.get("autoclass", {})
3151 autoclass["enabled"] = bool(value)
3152 self._patch_property("autoclass", autoclass)
3153
3154 @property
3155 def autoclass_toggle_time(self):
3156 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket.
3157 :rtype: datetime.datetime or ``NoneType``
3158 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally.
3159 """
3160 autoclass = self._properties.get("autoclass")
3161 if autoclass is not None:
3162 timestamp = autoclass.get("toggleTime")
3163 if timestamp is not None:
3164 return _rfc3339_nanos_to_datetime(timestamp)
3165
3166 @property
3167 def autoclass_terminal_storage_class(self):
3168 """The storage class that objects in an Autoclass bucket eventually transition to if
3169 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3170
3171 See https://cloud.google.com/storage/docs/using-autoclass for details.
3172
3173 :setter: Set the terminal storage class for Autoclass configuration.
3174 :getter: Get the terminal storage class for Autoclass configuration.
3175
3176 :rtype: str
3177 :returns: The terminal storage class if Autoclass is enabled, else ``None``.
3178 """
3179 autoclass = self._properties.get("autoclass", {})
3180 return autoclass.get("terminalStorageClass", None)
3181
3182 @autoclass_terminal_storage_class.setter
3183 def autoclass_terminal_storage_class(self, value):
3184 """The storage class that objects in an Autoclass bucket eventually transition to if
3185 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3186
3187 See https://cloud.google.com/storage/docs/using-autoclass for details.
3188
3189 :type value: str
3190 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`.
3191 """
3192 autoclass = self._properties.get("autoclass", {})
3193 autoclass["terminalStorageClass"] = value
3194 self._patch_property("autoclass", autoclass)
3195
3196 @property
3197 def autoclass_terminal_storage_class_update_time(self):
3198 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket
3199 :rtype: datetime.datetime or ``NoneType``
3200 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally.
3201 """
3202 autoclass = self._properties.get("autoclass")
3203 if autoclass is not None:
3204 timestamp = autoclass.get("terminalStorageClassUpdateTime")
3205 if timestamp is not None:
3206 return _rfc3339_nanos_to_datetime(timestamp)
3207
3208 @property
3209 def object_retention_mode(self):
3210 """Retrieve the object retention mode set on the bucket.
3211
3212 :rtype: str
3213 :returns: When set to Enabled, retention configurations can be
3214 set on objects in the bucket.
3215 """
3216 object_retention = self._properties.get("objectRetention")
3217 if object_retention is not None:
3218 return object_retention.get("mode")
3219
3220 @property
3221 def hierarchical_namespace_enabled(self):
3222 """Whether hierarchical namespace is enabled for this bucket.
3223
3224 :setter: Update whether hierarchical namespace is enabled for this bucket.
3225 :getter: Query whether hierarchical namespace is enabled for this bucket.
3226
3227 :rtype: bool
3228 :returns: True if enabled, else False.
3229 """
3230 hns = self._properties.get("hierarchicalNamespace", {})
3231 return hns.get("enabled")
3232
3233 @hierarchical_namespace_enabled.setter
3234 def hierarchical_namespace_enabled(self, value):
3235 """Enable or disable hierarchical namespace at the bucket-level.
3236
3237 :type value: convertible to boolean
3238 :param value: If true, enable hierarchical namespace for this bucket.
3239 If false, disable hierarchical namespace for this bucket.
3240
3241 .. note::
3242 To enable hierarchical namespace, you must set it at bucket creation time.
3243 Currently, hierarchical namespace configuration cannot be changed after bucket creation.
3244 """
3245 hns = self._properties.get("hierarchicalNamespace", {})
3246 hns["enabled"] = bool(value)
3247 self._patch_property("hierarchicalNamespace", hns)
3248
3249 def configure_website(self, main_page_suffix=None, not_found_page=None):
3250 """Configure website-related properties.
3251
3252 See https://cloud.google.com/storage/docs/static-website
3253
3254 .. note::
3255 This configures the bucket's website-related properties,controlling how
3256 the service behaves when accessing bucket contents as a web site.
3257 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and
3258 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python)
3259 for more information.
3260
3261 :type main_page_suffix: str
3262 :param main_page_suffix: The page to use as the main page
3263 of a directory.
3264 Typically something like index.html.
3265
3266 :type not_found_page: str
3267 :param not_found_page: The file to use when a page isn't found.
3268 """
3269 data = {
3270 "mainPageSuffix": main_page_suffix,
3271 "notFoundPage": not_found_page,
3272 }
3273 self._patch_property("website", data)
3274
3275 def disable_website(self):
3276 """Disable the website configuration for this bucket.
3277
3278 This is really just a shortcut for setting the website-related
3279 attributes to ``None``.
3280 """
3281 return self.configure_website(None, None)
3282
3283 def get_iam_policy(
3284 self,
3285 client=None,
3286 requested_policy_version=None,
3287 timeout=_DEFAULT_TIMEOUT,
3288 retry=DEFAULT_RETRY,
3289 ):
3290 """Retrieve the IAM policy for the bucket.
3291
3292 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy)
3293 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python).
3294
3295 If :attr:`user_project` is set, bills the API request to that project.
3296
3297 :type client: :class:`~google.cloud.storage.client.Client` or
3298 ``NoneType``
3299 :param client: (Optional) The client to use. If not passed, falls back
3300 to the ``client`` stored on the current bucket.
3301
3302 :type requested_policy_version: int or ``NoneType``
3303 :param requested_policy_version: (Optional) The version of IAM policies to request.
3304 If a policy with a condition is requested without
3305 setting this, the server will return an error.
3306 This must be set to a value of 3 to retrieve IAM
3307 policies containing conditions. This is to prevent
3308 client code that isn't aware of IAM conditions from
3309 interpreting and modifying policies incorrectly.
3310 The service might return a policy with version lower
3311 than the one that was requested, based on the
3312 feature syntax in the policy fetched.
3313
3314 :type timeout: float or tuple
3315 :param timeout:
3316 (Optional) The amount of time, in seconds, to wait
3317 for the server response. See: :ref:`configuring_timeouts`
3318
3319 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3320 :param retry:
3321 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3322
3323 :rtype: :class:`google.api_core.iam.Policy`
3324 :returns: the policy instance, based on the resource returned from
3325 the ``getIamPolicy`` API request.
3326 """
3327 with create_trace_span(name="Storage.Bucket.getIamPolicy"):
3328 client = self._require_client(client)
3329 query_params = {}
3330
3331 if self.user_project is not None:
3332 query_params["userProject"] = self.user_project
3333
3334 if requested_policy_version is not None:
3335 query_params["optionsRequestedPolicyVersion"] = requested_policy_version
3336
3337 info = client._get_resource(
3338 f"{self.path}/iam",
3339 query_params=query_params,
3340 timeout=timeout,
3341 retry=retry,
3342 _target_object=None,
3343 )
3344 return Policy.from_api_repr(info)
3345
3346 def set_iam_policy(
3347 self,
3348 policy,
3349 client=None,
3350 timeout=_DEFAULT_TIMEOUT,
3351 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON,
3352 ):
3353 """Update the IAM policy for the bucket.
3354
3355 See
3356 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
3357
3358 If :attr:`user_project` is set, bills the API request to that project.
3359
3360 :type policy: :class:`google.api_core.iam.Policy`
3361 :param policy: policy instance used to update bucket's IAM policy.
3362
3363 :type client: :class:`~google.cloud.storage.client.Client` or
3364 ``NoneType``
3365 :param client: (Optional) The client to use. If not passed, falls back
3366 to the ``client`` stored on the current bucket.
3367
3368 :type timeout: float or tuple
3369 :param timeout:
3370 (Optional) The amount of time, in seconds, to wait
3371 for the server response. See: :ref:`configuring_timeouts`
3372
3373 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3374 :param retry:
3375 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3376
3377 :rtype: :class:`google.api_core.iam.Policy`
3378 :returns: the policy instance, based on the resource returned from
3379 the ``setIamPolicy`` API request.
3380 """
3381 with create_trace_span(name="Storage.Bucket.setIamPolicy"):
3382 client = self._require_client(client)
3383 query_params = {}
3384
3385 if self.user_project is not None:
3386 query_params["userProject"] = self.user_project
3387
3388 path = f"{self.path}/iam"
3389 resource = policy.to_api_repr()
3390 resource["resourceId"] = self.path
3391
3392 info = client._put_resource(
3393 path,
3394 resource,
3395 query_params=query_params,
3396 timeout=timeout,
3397 retry=retry,
3398 _target_object=None,
3399 )
3400
3401 return Policy.from_api_repr(info)
3402
3403 def test_iam_permissions(
3404 self,
3405 permissions,
3406 client=None,
3407 timeout=_DEFAULT_TIMEOUT,
3408 retry=DEFAULT_RETRY,
3409 ):
3410 """API call: test permissions
3411
3412 See
3413 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
3414
3415 If :attr:`user_project` is set, bills the API request to that project.
3416
3417 :type permissions: list of string
3418 :param permissions: the permissions to check
3419
3420 :type client: :class:`~google.cloud.storage.client.Client` or
3421 ``NoneType``
3422 :param client: (Optional) The client to use. If not passed, falls back
3423 to the ``client`` stored on the current bucket.
3424
3425 :type timeout: float or tuple
3426 :param timeout:
3427 (Optional) The amount of time, in seconds, to wait
3428 for the server response. See: :ref:`configuring_timeouts`
3429
3430 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3431 :param retry:
3432 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3433
3434 :rtype: list of string
3435 :returns: the permissions returned by the ``testIamPermissions`` API
3436 request.
3437 """
3438 with create_trace_span(name="Storage.Bucket.testIamPermissions"):
3439 client = self._require_client(client)
3440 query_params = {"permissions": permissions}
3441
3442 if self.user_project is not None:
3443 query_params["userProject"] = self.user_project
3444
3445 path = f"{self.path}/iam/testPermissions"
3446 resp = client._get_resource(
3447 path,
3448 query_params=query_params,
3449 timeout=timeout,
3450 retry=retry,
3451 _target_object=None,
3452 )
3453 return resp.get("permissions", [])
3454
3455 def make_public(
3456 self,
3457 recursive=False,
3458 future=False,
3459 client=None,
3460 timeout=_DEFAULT_TIMEOUT,
3461 if_metageneration_match=None,
3462 if_metageneration_not_match=None,
3463 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3464 ):
3465 """Update bucket's ACL, granting read access to anonymous users.
3466
3467 :type recursive: bool
3468 :param recursive: If True, this will make all blobs inside the bucket
3469 public as well.
3470
3471 :type future: bool
3472 :param future: If True, this will make all objects created in the
3473 future public as well.
3474
3475 :type client: :class:`~google.cloud.storage.client.Client` or
3476 ``NoneType``
3477 :param client: (Optional) The client to use. If not passed, falls back
3478 to the ``client`` stored on the current bucket.
3479 :type timeout: float or tuple
3480 :param timeout:
3481 (Optional) The amount of time, in seconds, to wait
3482 for the server response. See: :ref:`configuring_timeouts`
3483
3484 :type if_metageneration_match: long
3485 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3486 blob's current metageneration matches the given value.
3487
3488 :type if_metageneration_not_match: long
3489 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3490 blob's current metageneration does not match the given value.
3491
3492 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3493 :param retry:
3494 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3495
3496 :raises ValueError:
3497 If ``recursive`` is True, and the bucket contains more than 256
3498 blobs. This is to prevent extremely long runtime of this
3499 method. For such buckets, iterate over the blobs returned by
3500 :meth:`list_blobs` and call
3501 :meth:`~google.cloud.storage.blob.Blob.make_public`
3502 for each blob.
3503 """
3504 with create_trace_span(name="Storage.Bucket.makePublic"):
3505 self.acl.all().grant_read()
3506 self.acl.save(
3507 client=client,
3508 timeout=timeout,
3509 if_metageneration_match=if_metageneration_match,
3510 if_metageneration_not_match=if_metageneration_not_match,
3511 retry=retry,
3512 )
3513
3514 if future:
3515 doa = self.default_object_acl
3516 if not doa.loaded:
3517 doa.reload(client=client, timeout=timeout)
3518 doa.all().grant_read()
3519 doa.save(
3520 client=client,
3521 timeout=timeout,
3522 if_metageneration_match=if_metageneration_match,
3523 if_metageneration_not_match=if_metageneration_not_match,
3524 retry=retry,
3525 )
3526
3527 if recursive:
3528 blobs = list(
3529 self.list_blobs(
3530 projection="full",
3531 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3532 client=client,
3533 timeout=timeout,
3534 )
3535 )
3536 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3537 message = (
3538 "Refusing to make public recursively with more than "
3539 "%d objects. If you actually want to make every object "
3540 "in this bucket public, iterate through the blobs "
3541 "returned by 'Bucket.list_blobs()' and call "
3542 "'make_public' on each one."
3543 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3544 raise ValueError(message)
3545
3546 for blob in blobs:
3547 blob.acl.all().grant_read()
3548 blob.acl.save(
3549 client=client,
3550 timeout=timeout,
3551 )
3552
3553 def make_private(
3554 self,
3555 recursive=False,
3556 future=False,
3557 client=None,
3558 timeout=_DEFAULT_TIMEOUT,
3559 if_metageneration_match=None,
3560 if_metageneration_not_match=None,
3561 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3562 ):
3563 """Update bucket's ACL, revoking read access for anonymous users.
3564
3565 :type recursive: bool
3566 :param recursive: If True, this will make all blobs inside the bucket
3567 private as well.
3568
3569 :type future: bool
3570 :param future: If True, this will make all objects created in the
3571 future private as well.
3572
3573 :type client: :class:`~google.cloud.storage.client.Client` or
3574 ``NoneType``
3575 :param client: (Optional) The client to use. If not passed, falls back
3576 to the ``client`` stored on the current bucket.
3577
3578 :type timeout: float or tuple
3579 :param timeout:
3580 (Optional) The amount of time, in seconds, to wait
3581 for the server response. See: :ref:`configuring_timeouts`
3582
3583 :type if_metageneration_match: long
3584 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3585 blob's current metageneration matches the given value.
3586 :type if_metageneration_not_match: long
3587 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3588 blob's current metageneration does not match the given value.
3589 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3590 :param retry:
3591 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3592
3593 :raises ValueError:
3594 If ``recursive`` is True, and the bucket contains more than 256
3595 blobs. This is to prevent extremely long runtime of this
3596 method. For such buckets, iterate over the blobs returned by
3597 :meth:`list_blobs` and call
3598 :meth:`~google.cloud.storage.blob.Blob.make_private`
3599 for each blob.
3600 """
3601 with create_trace_span(name="Storage.Bucket.makePrivate"):
3602 self.acl.all().revoke_read()
3603 self.acl.save(
3604 client=client,
3605 timeout=timeout,
3606 if_metageneration_match=if_metageneration_match,
3607 if_metageneration_not_match=if_metageneration_not_match,
3608 retry=retry,
3609 )
3610
3611 if future:
3612 doa = self.default_object_acl
3613 if not doa.loaded:
3614 doa.reload(client=client, timeout=timeout)
3615 doa.all().revoke_read()
3616 doa.save(
3617 client=client,
3618 timeout=timeout,
3619 if_metageneration_match=if_metageneration_match,
3620 if_metageneration_not_match=if_metageneration_not_match,
3621 retry=retry,
3622 )
3623
3624 if recursive:
3625 blobs = list(
3626 self.list_blobs(
3627 projection="full",
3628 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3629 client=client,
3630 timeout=timeout,
3631 )
3632 )
3633 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3634 message = (
3635 "Refusing to make private recursively with more than "
3636 "%d objects. If you actually want to make every object "
3637 "in this bucket private, iterate through the blobs "
3638 "returned by 'Bucket.list_blobs()' and call "
3639 "'make_private' on each one."
3640 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3641 raise ValueError(message)
3642
3643 for blob in blobs:
3644 blob.acl.all().revoke_read()
3645 blob.acl.save(client=client, timeout=timeout)
3646
3647 def generate_upload_policy(self, conditions, expiration=None, client=None):
3648 """Create a signed upload policy for uploading objects.
3649
3650 This method generates and signs a policy document. You can use
3651 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms)
3652 to allow visitors to a website to upload files to
3653 Google Cloud Storage without giving them direct write access.
3654 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python).
3655
3656 :type expiration: datetime
3657 :param expiration: (Optional) Expiration in UTC. If not specified, the
3658 policy will expire in 1 hour.
3659
3660 :type conditions: list
3661 :param conditions: A list of conditions as described in the
3662 `policy documents` documentation.
3663
3664 :type client: :class:`~google.cloud.storage.client.Client`
3665 :param client: (Optional) The client to use. If not passed, falls back
3666 to the ``client`` stored on the current bucket.
3667
3668 :rtype: dict
3669 :returns: A dictionary of (form field name, form field value) of form
3670 fields that should be added to your HTML upload form in order
3671 to attach the signature.
3672 """
3673 client = self._require_client(client)
3674 credentials = client._credentials
3675 _signing.ensure_signed_credentials(credentials)
3676
3677 if expiration is None:
3678 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1)
3679
3680 conditions = conditions + [{"bucket": self.name}]
3681
3682 policy_document = {
3683 "expiration": _datetime_to_rfc3339(expiration),
3684 "conditions": conditions,
3685 }
3686
3687 encoded_policy_document = base64.b64encode(
3688 json.dumps(policy_document).encode("utf-8")
3689 )
3690 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
3691
3692 fields = {
3693 "bucket": self.name,
3694 "GoogleAccessId": credentials.signer_email,
3695 "policy": encoded_policy_document.decode("utf-8"),
3696 "signature": signature.decode("utf-8"),
3697 }
3698
3699 return fields
3700
3701 def lock_retention_policy(
3702 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3703 ):
3704 """Lock the bucket's retention policy.
3705
3706 :type client: :class:`~google.cloud.storage.client.Client` or
3707 ``NoneType``
3708 :param client: (Optional) The client to use. If not passed, falls back
3709 to the ``client`` stored on the blob's bucket.
3710
3711 :type timeout: float or tuple
3712 :param timeout:
3713 (Optional) The amount of time, in seconds, to wait
3714 for the server response. See: :ref:`configuring_timeouts`
3715
3716 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3717 :param retry:
3718 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3719
3720 :raises ValueError:
3721 if the bucket has no metageneration (i.e., new or never reloaded);
3722 if the bucket has no retention policy assigned;
3723 if the bucket's retention policy is already locked.
3724 """
3725 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"):
3726 if "metageneration" not in self._properties:
3727 raise ValueError(
3728 "Bucket has no retention policy assigned: try 'reload'?"
3729 )
3730
3731 policy = self._properties.get("retentionPolicy")
3732
3733 if policy is None:
3734 raise ValueError(
3735 "Bucket has no retention policy assigned: try 'reload'?"
3736 )
3737
3738 if policy.get("isLocked"):
3739 raise ValueError("Bucket's retention policy is already locked.")
3740
3741 client = self._require_client(client)
3742
3743 query_params = {"ifMetagenerationMatch": self.metageneration}
3744
3745 if self.user_project is not None:
3746 query_params["userProject"] = self.user_project
3747
3748 path = f"/b/{self.name}/lockRetentionPolicy"
3749 api_response = client._post_resource(
3750 path,
3751 None,
3752 query_params=query_params,
3753 timeout=timeout,
3754 retry=retry,
3755 _target_object=self,
3756 )
3757 self._set_properties(api_response)
3758
3759 def generate_signed_url(
3760 self,
3761 expiration=None,
3762 api_access_endpoint=None,
3763 method="GET",
3764 headers=None,
3765 query_parameters=None,
3766 client=None,
3767 credentials=None,
3768 version=None,
3769 virtual_hosted_style=False,
3770 bucket_bound_hostname=None,
3771 scheme="http",
3772 ):
3773 """Generates a signed URL for this bucket.
3774
3775 .. note::
3776
3777 If you are on Google Compute Engine, you can't generate a signed
3778 URL using GCE service account. If you'd like to be able to generate
3779 a signed URL from GCE, you can use a standard service account from a
3780 JSON file rather than a GCE service account.
3781
3782 If you have a bucket that you want to allow access to for a set
3783 amount of time, you can use this method to generate a URL that
3784 is only valid within a certain time period.
3785
3786 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`,
3787 ``https`` works only if using a ``CDN``.
3788
3789 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
3790 :param expiration: Point in time when the signed URL should expire. If
3791 a ``datetime`` instance is passed without an explicit
3792 ``tzinfo`` set, it will be assumed to be ``UTC``.
3793
3794 :type api_access_endpoint: str
3795 :param api_access_endpoint: (Optional) URI base, for instance
3796 "https://storage.googleapis.com". If not specified, the client's
3797 api_endpoint will be used. Incompatible with bucket_bound_hostname.
3798
3799 :type method: str
3800 :param method: The HTTP verb that will be used when requesting the URL.
3801
3802 :type headers: dict
3803 :param headers:
3804 (Optional) Additional HTTP headers to be included as part of the
3805 signed URLs. See:
3806 https://cloud.google.com/storage/docs/xml-api/reference-headers
3807 Requests using the signed URL *must* pass the specified header
3808 (name and value) with each request for the URL.
3809
3810 :type query_parameters: dict
3811 :param query_parameters:
3812 (Optional) Additional query parameters to be included as part of the
3813 signed URLs. See:
3814 https://cloud.google.com/storage/docs/xml-api/reference-headers#query
3815
3816 :type client: :class:`~google.cloud.storage.client.Client` or
3817 ``NoneType``
3818 :param client: (Optional) The client to use. If not passed, falls back
3819 to the ``client`` stored on the blob's bucket.
3820
3821 :type credentials: :class:`google.auth.credentials.Credentials` or
3822 :class:`NoneType`
3823 :param credentials: The authorization credentials to attach to requests.
3824 These credentials identify this application to the service.
3825 If none are specified, the client will attempt to ascertain
3826 the credentials from the environment.
3827
3828 :type version: str
3829 :param version: (Optional) The version of signed credential to create.
3830 Must be one of 'v2' | 'v4'.
3831
3832 :type virtual_hosted_style: bool
3833 :param virtual_hosted_style:
3834 (Optional) If true, then construct the URL relative the bucket's
3835 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
3836 Incompatible with bucket_bound_hostname.
3837
3838 :type bucket_bound_hostname: str
3839 :param bucket_bound_hostname:
3840 (Optional) If passed, then construct the URL relative to the bucket-bound hostname.
3841 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'.
3842 Incompatible with api_access_endpoint and virtual_hosted_style.
3843 See: https://cloud.google.com/storage/docs/request-endpoints#cname
3844
3845 :type scheme: str
3846 :param scheme:
3847 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
3848 this value as the scheme. ``https`` will work only when using a CDN.
3849 Defaults to ``"http"``.
3850
3851 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used.
3852 :raises: :exc:`TypeError` when expiration is not a valid type.
3853 :raises: :exc:`AttributeError` if credentials is not an instance
3854 of :class:`google.auth.credentials.Signing`.
3855
3856 :rtype: str
3857 :returns: A signed URL you can use to access the resource
3858 until expiration.
3859 """
3860 if version is None:
3861 version = "v2"
3862 elif version not in ("v2", "v4"):
3863 raise ValueError("'version' must be either 'v2' or 'v4'")
3864
3865 if (
3866 api_access_endpoint is not None or virtual_hosted_style
3867 ) and bucket_bound_hostname:
3868 raise ValueError(
3869 "The bucket_bound_hostname argument is not compatible with "
3870 "either api_access_endpoint or virtual_hosted_style."
3871 )
3872
3873 if api_access_endpoint is None:
3874 client = self._require_client(client)
3875 api_access_endpoint = client.api_endpoint
3876
3877 # If you are on Google Compute Engine, you can't generate a signed URL
3878 # using GCE service account.
3879 # See https://github.com/googleapis/google-auth-library-python/issues/50
3880 if virtual_hosted_style:
3881 api_access_endpoint = _virtual_hosted_style_base_url(
3882 api_access_endpoint, self.name
3883 )
3884 resource = "/"
3885 elif bucket_bound_hostname:
3886 api_access_endpoint = _bucket_bound_hostname_url(
3887 bucket_bound_hostname, scheme
3888 )
3889 resource = "/"
3890 else:
3891 resource = f"/{self.name}"
3892
3893 if credentials is None:
3894 client = self._require_client(client) # May be redundant, but that's ok.
3895 credentials = client._credentials
3896
3897 if version == "v2":
3898 helper = generate_signed_url_v2
3899 else:
3900 helper = generate_signed_url_v4
3901
3902 return helper(
3903 credentials,
3904 resource=resource,
3905 expiration=expiration,
3906 api_access_endpoint=api_access_endpoint,
3907 method=method.upper(),
3908 headers=headers,
3909 query_parameters=query_parameters,
3910 )
3911
3912 @property
3913 def ip_filter(self):
3914 """Retrieve or set the IP Filter configuration for this bucket.
3915
3916 See https://cloud.google.com/storage/docs/ip-filtering-overview and
3917 https://cloud.google.com/storage/docs/json_api/v1/buckets#ipFilter
3918
3919 .. note::
3920 The getter for this property returns an
3921 :class:`~google.cloud.storage.ip_filter.IPFilter` object, which is a
3922 structured representation of the bucket's IP filter configuration.
3923 Modifying the returned object has no effect. To update the bucket's
3924 IP filter, create and assign a new ``IPFilter`` object to this
3925 property and then call
3926 :meth:`~google.cloud.storage.bucket.Bucket.patch`.
3927
3928 .. code-block:: python
3929
3930 from google.cloud.storage.ip_filter import (
3931 IPFilter,
3932 PublicNetworkSource,
3933 )
3934
3935 ip_filter = IPFilter()
3936 ip_filter.mode = "Enabled"
3937 ip_filter.public_network_source = PublicNetworkSource(
3938 allowed_ip_cidr_ranges=["203.0.113.5/32"]
3939 )
3940 bucket.ip_filter = ip_filter
3941 bucket.patch()
3942
3943 :setter: Set the IP Filter configuration for this bucket.
3944 :getter: Gets the IP Filter configuration for this bucket.
3945
3946 :rtype: :class:`~google.cloud.storage.ip_filter.IPFilter` or ``NoneType``
3947 :returns:
3948 An ``IPFilter`` object representing the configuration, or ``None``
3949 if no filter is configured.
3950 """
3951 resource = self._properties.get(_IP_FILTER_PROPERTY)
3952 if resource:
3953 return IPFilter._from_api_resource(resource)
3954 return None
3955
3956 @ip_filter.setter
3957 def ip_filter(self, value):
3958 if value is None:
3959 self._patch_property(_IP_FILTER_PROPERTY, None)
3960 elif isinstance(value, IPFilter):
3961 self._patch_property(_IP_FILTER_PROPERTY, value._to_api_resource())
3962 else:
3963 self._patch_property(_IP_FILTER_PROPERTY, value)
3964
3965
3966class SoftDeletePolicy(dict):
3967 """Map a bucket's soft delete policy.
3968
3969 See https://cloud.google.com/storage/docs/soft-delete
3970
3971 :type bucket: :class:`Bucket`
3972 :param bucket: Bucket for which this instance is the policy.
3973
3974 :type retention_duration_seconds: int
3975 :param retention_duration_seconds:
3976 (Optional) The period of time in seconds that soft-deleted objects in the bucket
3977 will be retained and cannot be permanently deleted.
3978
3979 :type effective_time: :class:`datetime.datetime`
3980 :param effective_time:
3981 (Optional) When the bucket's soft delete policy is effective.
3982 This value should normally only be set by the back-end API.
3983 """
3984
3985 def __init__(self, bucket, **kw):
3986 data = {}
3987 retention_duration_seconds = kw.get("retention_duration_seconds")
3988 data["retentionDurationSeconds"] = retention_duration_seconds
3989
3990 effective_time = kw.get("effective_time")
3991 if effective_time is not None:
3992 effective_time = _datetime_to_rfc3339(effective_time)
3993 data["effectiveTime"] = effective_time
3994
3995 super().__init__(data)
3996 self._bucket = bucket
3997
3998 @classmethod
3999 def from_api_repr(cls, resource, bucket):
4000 """Factory: construct instance from resource.
4001
4002 :type resource: dict
4003 :param resource: mapping as returned from API call.
4004
4005 :type bucket: :class:`Bucket`
4006 :params bucket: Bucket for which this instance is the policy.
4007
4008 :rtype: :class:`SoftDeletePolicy`
4009 :returns: Instance created from resource.
4010 """
4011 instance = cls(bucket)
4012 instance.update(resource)
4013 return instance
4014
4015 @property
4016 def bucket(self):
4017 """Bucket for which this instance is the policy.
4018
4019 :rtype: :class:`Bucket`
4020 :returns: the instance's bucket.
4021 """
4022 return self._bucket
4023
4024 @property
4025 def retention_duration_seconds(self):
4026 """Get the retention duration of the bucket's soft delete policy.
4027
4028 :rtype: int or ``NoneType``
4029 :returns: The period of time in seconds that soft-deleted objects in the bucket
4030 will be retained and cannot be permanently deleted; Or ``None`` if the
4031 property is not set.
4032 """
4033 duration = self.get("retentionDurationSeconds")
4034 if duration is not None:
4035 return int(duration)
4036
4037 @retention_duration_seconds.setter
4038 def retention_duration_seconds(self, value):
4039 """Set the retention duration of the bucket's soft delete policy.
4040
4041 :type value: int
4042 :param value:
4043 The period of time in seconds that soft-deleted objects in the bucket
4044 will be retained and cannot be permanently deleted.
4045 """
4046 self["retentionDurationSeconds"] = value
4047 self.bucket._patch_property("softDeletePolicy", self)
4048
4049 @property
4050 def effective_time(self):
4051 """Get the effective time of the bucket's soft delete policy.
4052
4053 :rtype: datetime.datetime or ``NoneType``
4054 :returns: point-in time at which the bucket's soft delte policy is
4055 effective, or ``None`` if the property is not set.
4056 """
4057 timestamp = self.get("effectiveTime")
4058 if timestamp is not None:
4059 return _rfc3339_nanos_to_datetime(timestamp)
4060
4061
4062def _raise_if_len_differs(expected_len, **generation_match_args):
4063 """
4064 Raise an error if any generation match argument
4065 is set and its len differs from the given value.
4066
4067 :type expected_len: int
4068 :param expected_len: Expected argument length in case it's set.
4069
4070 :type generation_match_args: dict
4071 :param generation_match_args: Lists, which length must be checked.
4072
4073 :raises: :exc:`ValueError` if any argument set, but has an unexpected length.
4074 """
4075 for name, value in generation_match_args.items():
4076 if value is not None and len(value) != expected_len:
4077 raise ValueError(f"'{name}' length must be the same as 'blobs' length")