1# Copyright 2014 Google LLC
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Create / interact with Google Cloud Storage buckets."""
16
17import base64
18import copy
19import datetime
20import json
21from urllib.parse import urlsplit
22import warnings
23
24from google.api_core import datetime_helpers
25from google.cloud._helpers import _datetime_to_rfc3339
26from google.cloud._helpers import _rfc3339_nanos_to_datetime
27from google.cloud.exceptions import NotFound
28from google.api_core.iam import Policy
29from google.cloud.storage import _signing
30from google.cloud.storage._helpers import _add_etag_match_headers
31from google.cloud.storage._helpers import _add_generation_match_parameters
32from google.cloud.storage._helpers import _NOW
33from google.cloud.storage._helpers import _PropertyMixin
34from google.cloud.storage._helpers import _UTC
35from google.cloud.storage._helpers import _scalar_property
36from google.cloud.storage._helpers import _validate_name
37from google.cloud.storage._signing import generate_signed_url_v2
38from google.cloud.storage._signing import generate_signed_url_v4
39from google.cloud.storage._helpers import _bucket_bound_hostname_url
40from google.cloud.storage._helpers import _virtual_hosted_style_base_url
41from google.cloud.storage._opentelemetry_tracing import create_trace_span
42from google.cloud.storage.acl import BucketACL
43from google.cloud.storage.acl import DefaultObjectACL
44from google.cloud.storage.blob import Blob
45from google.cloud.storage.constants import _DEFAULT_TIMEOUT
46from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS
47from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS
48from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE
49from google.cloud.storage.constants import (
50 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,
51)
52from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS
53from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE
54from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS
55from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
56from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS
57from google.cloud.storage.constants import REGION_LOCATION_TYPE
58from google.cloud.storage.constants import STANDARD_STORAGE_CLASS
59from google.cloud.storage.ip_filter import IPFilter
60from google.cloud.storage.notification import BucketNotification
61from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
62from google.cloud.storage.retry import DEFAULT_RETRY
63from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
64from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON
65from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED
66
67
68_UBLA_BPO_ENABLED_MESSAGE = (
69 "Pass only one of 'uniform_bucket_level_access_enabled' / "
70 "'bucket_policy_only_enabled' to 'IAMConfiguration'."
71)
72_BPO_ENABLED_MESSAGE = (
73 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. "
74 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'."
75)
76_UBLA_BPO_LOCK_TIME_MESSAGE = (
77 "Pass only one of 'uniform_bucket_level_access_lock_time' / "
78 "'bucket_policy_only_lock_time' to 'IAMConfiguration'."
79)
80_BPO_LOCK_TIME_MESSAGE = (
81 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. "
82 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'."
83)
84_LOCATION_SETTER_MESSAGE = (
85 "Assignment to 'Bucket.location' is deprecated, as it is only "
86 "valid before the bucket is created. Instead, pass the location "
87 "to `Bucket.create`."
88)
89_FROM_STRING_MESSAGE = (
90 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead."
91)
92_IP_FILTER_PROPERTY = "ipFilter"
93
94
95def _blobs_page_start(iterator, page, response):
96 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
97
98 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
99 :param iterator: The iterator that is currently in use.
100
101 :type page: :class:`~google.cloud.api.core.page_iterator.Page`
102 :param page: The page that was just created.
103
104 :type response: dict
105 :param response: The JSON API response for a page of blobs.
106 """
107 page.prefixes = tuple(response.get("prefixes", ()))
108 iterator.prefixes.update(page.prefixes)
109
110
111def _item_to_blob(iterator, item):
112 """Convert a JSON blob to the native object.
113
114 .. note::
115
116 This assumes that the ``bucket`` attribute has been
117 added to the iterator after being created.
118
119 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
120 :param iterator: The iterator that has retrieved the item.
121
122 :type item: dict
123 :param item: An item to be converted to a blob.
124
125 :rtype: :class:`.Blob`
126 :returns: The next blob in the page.
127 """
128 name = item.get("name")
129 blob = Blob(name, bucket=iterator.bucket)
130 blob._set_properties(item)
131 return blob
132
133
134def _item_to_notification(iterator, item):
135 """Convert a JSON blob to the native object.
136
137 .. note::
138
139 This assumes that the ``bucket`` attribute has been
140 added to the iterator after being created.
141
142 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
143 :param iterator: The iterator that has retrieved the item.
144
145 :type item: dict
146 :param item: An item to be converted to a blob.
147
148 :rtype: :class:`.BucketNotification`
149 :returns: The next notification being iterated.
150 """
151 return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
152
153
154class LifecycleRuleConditions(dict):
155 """Map a single lifecycle rule for a bucket.
156
157 See: https://cloud.google.com/storage/docs/lifecycle
158
159 :type age: int
160 :param age: (Optional) Apply rule action to items whose age, in days,
161 exceeds this value.
162
163 :type created_before: datetime.date
164 :param created_before: (Optional) Apply rule action to items created
165 before this date.
166
167 :type is_live: bool
168 :param is_live: (Optional) If true, apply rule action to non-versioned
169 items, or to items with no newer versions. If false, apply
170 rule action to versioned items with at least one newer
171 version.
172
173 :type matches_prefix: list(str)
174 :param matches_prefix: (Optional) Apply rule action to items which
175 any prefix matches the beginning of the item name.
176
177 :type matches_storage_class: list(str), one or more of
178 :attr:`Bucket.STORAGE_CLASSES`.
179 :param matches_storage_class: (Optional) Apply rule action to items
180 whose storage class matches this value.
181
182 :type matches_suffix: list(str)
183 :param matches_suffix: (Optional) Apply rule action to items which
184 any suffix matches the end of the item name.
185
186 :type number_of_newer_versions: int
187 :param number_of_newer_versions: (Optional) Apply rule action to versioned
188 items having N newer versions.
189
190 :type days_since_custom_time: int
191 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days
192 elapsed since the custom timestamp. This condition is relevant
193 only for versioned objects. The value of the field must be a non
194 negative integer. If it's zero, the object version will become
195 eligible for lifecycle action as soon as it becomes custom.
196
197 :type custom_time_before: :class:`datetime.date`
198 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action
199 to items whose custom time is before this date. This condition is relevant
200 only for versioned objects, e.g., 2019-03-16.
201
202 :type days_since_noncurrent_time: int
203 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days
204 elapsed since the non current timestamp. This condition
205 is relevant only for versioned objects. The value of the field
206 must be a non negative integer. If it's zero, the object version
207 will become eligible for lifecycle action as soon as it becomes
208 non current.
209
210 :type noncurrent_time_before: :class:`datetime.date`
211 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply
212 rule action to items whose non current time is before this date.
213 This condition is relevant only for versioned objects, e.g, 2019-03-16.
214
215 :raises ValueError: if no arguments are passed.
216 """
217
218 def __init__(
219 self,
220 age=None,
221 created_before=None,
222 is_live=None,
223 matches_storage_class=None,
224 number_of_newer_versions=None,
225 days_since_custom_time=None,
226 custom_time_before=None,
227 days_since_noncurrent_time=None,
228 noncurrent_time_before=None,
229 matches_prefix=None,
230 matches_suffix=None,
231 _factory=False,
232 ):
233 conditions = {}
234
235 if age is not None:
236 conditions["age"] = age
237
238 if created_before is not None:
239 conditions["createdBefore"] = created_before.isoformat()
240
241 if is_live is not None:
242 conditions["isLive"] = is_live
243
244 if matches_storage_class is not None:
245 conditions["matchesStorageClass"] = matches_storage_class
246
247 if number_of_newer_versions is not None:
248 conditions["numNewerVersions"] = number_of_newer_versions
249
250 if days_since_custom_time is not None:
251 conditions["daysSinceCustomTime"] = days_since_custom_time
252
253 if custom_time_before is not None:
254 conditions["customTimeBefore"] = custom_time_before.isoformat()
255
256 if days_since_noncurrent_time is not None:
257 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time
258
259 if noncurrent_time_before is not None:
260 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat()
261
262 if matches_prefix is not None:
263 conditions["matchesPrefix"] = matches_prefix
264
265 if matches_suffix is not None:
266 conditions["matchesSuffix"] = matches_suffix
267
268 if not _factory and not conditions:
269 raise ValueError("Supply at least one condition")
270
271 super(LifecycleRuleConditions, self).__init__(conditions)
272
273 @classmethod
274 def from_api_repr(cls, resource):
275 """Factory: construct instance from resource.
276
277 :type resource: dict
278 :param resource: mapping as returned from API call.
279
280 :rtype: :class:`LifecycleRuleConditions`
281 :returns: Instance created from resource.
282 """
283 instance = cls(_factory=True)
284 instance.update(resource)
285 return instance
286
287 @property
288 def age(self):
289 """Conditon's age value."""
290 return self.get("age")
291
292 @property
293 def created_before(self):
294 """Conditon's created_before value."""
295 before = self.get("createdBefore")
296 if before is not None:
297 return datetime_helpers.from_iso8601_date(before)
298
299 @property
300 def is_live(self):
301 """Conditon's 'is_live' value."""
302 return self.get("isLive")
303
304 @property
305 def matches_prefix(self):
306 """Conditon's 'matches_prefix' value."""
307 return self.get("matchesPrefix")
308
309 @property
310 def matches_storage_class(self):
311 """Conditon's 'matches_storage_class' value."""
312 return self.get("matchesStorageClass")
313
314 @property
315 def matches_suffix(self):
316 """Conditon's 'matches_suffix' value."""
317 return self.get("matchesSuffix")
318
319 @property
320 def number_of_newer_versions(self):
321 """Conditon's 'number_of_newer_versions' value."""
322 return self.get("numNewerVersions")
323
324 @property
325 def days_since_custom_time(self):
326 """Conditon's 'days_since_custom_time' value."""
327 return self.get("daysSinceCustomTime")
328
329 @property
330 def custom_time_before(self):
331 """Conditon's 'custom_time_before' value."""
332 before = self.get("customTimeBefore")
333 if before is not None:
334 return datetime_helpers.from_iso8601_date(before)
335
336 @property
337 def days_since_noncurrent_time(self):
338 """Conditon's 'days_since_noncurrent_time' value."""
339 return self.get("daysSinceNoncurrentTime")
340
341 @property
342 def noncurrent_time_before(self):
343 """Conditon's 'noncurrent_time_before' value."""
344 before = self.get("noncurrentTimeBefore")
345 if before is not None:
346 return datetime_helpers.from_iso8601_date(before)
347
348
349class LifecycleRuleDelete(dict):
350 """Map a lifecycle rule deleting matching items.
351
352 :type kw: dict
353 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
354 """
355
356 def __init__(self, **kw):
357 conditions = LifecycleRuleConditions(**kw)
358 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)}
359 super().__init__(rule)
360
361 @classmethod
362 def from_api_repr(cls, resource):
363 """Factory: construct instance from resource.
364
365 :type resource: dict
366 :param resource: mapping as returned from API call.
367
368 :rtype: :class:`LifecycleRuleDelete`
369 :returns: Instance created from resource.
370 """
371 instance = cls(_factory=True)
372 instance.update(resource)
373 return instance
374
375
376class LifecycleRuleSetStorageClass(dict):
377 """Map a lifecycle rule updating storage class of matching items.
378
379 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`.
380 :param storage_class: new storage class to assign to matching items.
381
382 :type kw: dict
383 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
384 """
385
386 def __init__(self, storage_class, **kw):
387 conditions = LifecycleRuleConditions(**kw)
388 rule = {
389 "action": {
390 "type": "SetStorageClass",
391 "storageClass": storage_class,
392 },
393 "condition": dict(conditions),
394 }
395 super().__init__(rule)
396
397 @classmethod
398 def from_api_repr(cls, resource):
399 """Factory: construct instance from resource.
400
401 :type resource: dict
402 :param resource: mapping as returned from API call.
403
404 :rtype: :class:`LifecycleRuleSetStorageClass`
405 :returns: Instance created from resource.
406 """
407 action = resource["action"]
408 instance = cls(action["storageClass"], _factory=True)
409 instance.update(resource)
410 return instance
411
412
413class LifecycleRuleAbortIncompleteMultipartUpload(dict):
414 """Map a rule aborting incomplete multipart uploads of matching items.
415
416 The "age" lifecycle condition is the only supported condition for this rule.
417
418 :type kw: dict
419 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
420 """
421
422 def __init__(self, **kw):
423 conditions = LifecycleRuleConditions(**kw)
424 rule = {
425 "action": {"type": "AbortIncompleteMultipartUpload"},
426 "condition": dict(conditions),
427 }
428 super().__init__(rule)
429
430 @classmethod
431 def from_api_repr(cls, resource):
432 """Factory: construct instance from resource.
433
434 :type resource: dict
435 :param resource: mapping as returned from API call.
436
437 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload`
438 :returns: Instance created from resource.
439 """
440 instance = cls(_factory=True)
441 instance.update(resource)
442 return instance
443
444
445_default = object()
446
447
448class IAMConfiguration(dict):
449 """Map a bucket's IAM configuration.
450
451 :type bucket: :class:`Bucket`
452 :params bucket: Bucket for which this instance is the policy.
453
454 :type public_access_prevention: str
455 :params public_access_prevention:
456 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced'
457 See: https://cloud.google.com/storage/docs/public-access-prevention
458
459 :type uniform_bucket_level_access_enabled: bool
460 :params bucket_policy_only_enabled:
461 (Optional) Whether the IAM-only policy is enabled for the bucket.
462
463 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime`
464 :params uniform_bucket_level_locked_time:
465 (Optional) When the bucket's IAM-only policy was enabled.
466 This value should normally only be set by the back-end API.
467
468 :type bucket_policy_only_enabled: bool
469 :params bucket_policy_only_enabled:
470 Deprecated alias for :data:`uniform_bucket_level_access_enabled`.
471
472 :type bucket_policy_only_locked_time: :class:`datetime.datetime`
473 :params bucket_policy_only_locked_time:
474 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`.
475 """
476
477 def __init__(
478 self,
479 bucket,
480 public_access_prevention=_default,
481 uniform_bucket_level_access_enabled=_default,
482 uniform_bucket_level_access_locked_time=_default,
483 bucket_policy_only_enabled=_default,
484 bucket_policy_only_locked_time=_default,
485 ):
486 if bucket_policy_only_enabled is not _default:
487 if uniform_bucket_level_access_enabled is not _default:
488 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE)
489
490 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
491 uniform_bucket_level_access_enabled = bucket_policy_only_enabled
492
493 if bucket_policy_only_locked_time is not _default:
494 if uniform_bucket_level_access_locked_time is not _default:
495 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE)
496
497 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2)
498 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time
499
500 if uniform_bucket_level_access_enabled is _default:
501 uniform_bucket_level_access_enabled = False
502
503 if public_access_prevention is _default:
504 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED
505
506 data = {
507 "uniformBucketLevelAccess": {
508 "enabled": uniform_bucket_level_access_enabled
509 },
510 "publicAccessPrevention": public_access_prevention,
511 }
512 if uniform_bucket_level_access_locked_time is not _default:
513 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339(
514 uniform_bucket_level_access_locked_time
515 )
516 super(IAMConfiguration, self).__init__(data)
517 self._bucket = bucket
518
519 @classmethod
520 def from_api_repr(cls, resource, bucket):
521 """Factory: construct instance from resource.
522
523 :type bucket: :class:`Bucket`
524 :params bucket: Bucket for which this instance is the policy.
525
526 :type resource: dict
527 :param resource: mapping as returned from API call.
528
529 :rtype: :class:`IAMConfiguration`
530 :returns: Instance created from resource.
531 """
532 instance = cls(bucket)
533 instance.update(resource)
534 return instance
535
536 @property
537 def bucket(self):
538 """Bucket for which this instance is the policy.
539
540 :rtype: :class:`Bucket`
541 :returns: the instance's bucket.
542 """
543 return self._bucket
544
545 @property
546 def public_access_prevention(self):
547 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'.
548
549 See: https://cloud.google.com/storage/docs/public-access-prevention
550
551 :rtype: string
552 :returns: the public access prevention status, either 'enforced' or 'inherited'.
553 """
554 return self["publicAccessPrevention"]
555
556 @public_access_prevention.setter
557 def public_access_prevention(self, value):
558 self["publicAccessPrevention"] = value
559 self.bucket._patch_property("iamConfiguration", self)
560
561 @property
562 def uniform_bucket_level_access_enabled(self):
563 """If set, access checks only use bucket-level IAM policies or above.
564
565 :rtype: bool
566 :returns: whether the bucket is configured to allow only IAM.
567 """
568 ubla = self.get("uniformBucketLevelAccess", {})
569 return ubla.get("enabled", False)
570
571 @uniform_bucket_level_access_enabled.setter
572 def uniform_bucket_level_access_enabled(self, value):
573 ubla = self.setdefault("uniformBucketLevelAccess", {})
574 ubla["enabled"] = bool(value)
575 self.bucket._patch_property("iamConfiguration", self)
576
577 @property
578 def uniform_bucket_level_access_locked_time(self):
579 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false.
580
581 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property
582 is time time after which that setting becomes immutable.
583
584 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property
585 is ``None``.
586
587 :rtype: Union[:class:`datetime.datetime`, None]
588 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will
589 be frozen as true.
590 """
591 ubla = self.get("uniformBucketLevelAccess", {})
592 stamp = ubla.get("lockedTime")
593 if stamp is not None:
594 stamp = _rfc3339_nanos_to_datetime(stamp)
595 return stamp
596
597 @property
598 def bucket_policy_only_enabled(self):
599 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`.
600
601 :rtype: bool
602 :returns: whether the bucket is configured to allow only IAM.
603 """
604 return self.uniform_bucket_level_access_enabled
605
606 @bucket_policy_only_enabled.setter
607 def bucket_policy_only_enabled(self, value):
608 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
609 self.uniform_bucket_level_access_enabled = value
610
611 @property
612 def bucket_policy_only_locked_time(self):
613 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`.
614
615 :rtype: Union[:class:`datetime.datetime`, None]
616 :returns:
617 (readonly) Time after which :attr:`bucket_policy_only_enabled` will
618 be frozen as true.
619 """
620 return self.uniform_bucket_level_access_locked_time
621
622
623class Bucket(_PropertyMixin):
624 """A class representing a Bucket on Cloud Storage.
625
626 :type client: :class:`google.cloud.storage.client.Client`
627 :param client: A client which holds credentials and project configuration
628 for the bucket (which requires a project).
629
630 :type name: str
631 :param name: The name of the bucket. Bucket names must start and end with a
632 number or letter.
633
634 :type user_project: str
635 :param user_project: (Optional) the project ID to be billed for API
636 requests made via this instance.
637
638 :type generation: int
639 :param generation: (Optional) If present, selects a specific revision of
640 this bucket.
641 """
642
643 _MAX_OBJECTS_FOR_ITERATION = 256
644 """Maximum number of existing objects allowed in iteration.
645
646 This is used in Bucket.delete() and Bucket.make_public().
647 """
648
649 STORAGE_CLASSES = (
650 STANDARD_STORAGE_CLASS,
651 NEARLINE_STORAGE_CLASS,
652 COLDLINE_STORAGE_CLASS,
653 ARCHIVE_STORAGE_CLASS,
654 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy
655 REGIONAL_LEGACY_STORAGE_CLASS, # legacy
656 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy
657 )
658 """Allowed values for :attr:`storage_class`.
659
660 Default value is :attr:`STANDARD_STORAGE_CLASS`.
661
662 See
663 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
664 https://cloud.google.com/storage/docs/storage-classes
665 """
666
667 _LOCATION_TYPES = (
668 MULTI_REGION_LOCATION_TYPE,
669 REGION_LOCATION_TYPE,
670 DUAL_REGION_LOCATION_TYPE,
671 )
672 """Allowed values for :attr:`location_type`."""
673
674 def __init__(self, client, name=None, user_project=None, generation=None):
675 """
676 property :attr:`name`
677 Get the bucket's name.
678 """
679 name = _validate_name(name)
680 super(Bucket, self).__init__(name=name)
681 self._client = client
682 self._acl = BucketACL(self)
683 self._default_object_acl = DefaultObjectACL(self)
684 self._label_removals = set()
685 self._user_project = user_project
686
687 if generation is not None:
688 self._properties["generation"] = generation
689
690 def __repr__(self):
691 return f"<Bucket: {self.name}>"
692
693 @property
694 def client(self):
695 """The client bound to this bucket."""
696 return self._client
697
698 def _set_properties(self, value):
699 """Set the properties for the current object.
700
701 :type value: dict or :class:`google.cloud.storage.batch._FutureDict`
702 :param value: The properties to be set.
703 """
704 self._label_removals.clear()
705 return super(Bucket, self)._set_properties(value)
706
707 @property
708 def rpo(self):
709 """Get the RPO (Recovery Point Objective) of this bucket
710
711 See: https://cloud.google.com/storage/docs/managing-turbo-replication
712
713 "ASYNC_TURBO" or "DEFAULT"
714 :rtype: str
715 """
716 return self._properties.get("rpo")
717
718 @rpo.setter
719 def rpo(self, value):
720 """
721 Set the RPO (Recovery Point Objective) of this bucket.
722
723 See: https://cloud.google.com/storage/docs/managing-turbo-replication
724
725 :type value: str
726 :param value: "ASYNC_TURBO" or "DEFAULT"
727 """
728 self._patch_property("rpo", value)
729
730 @property
731 def user_project(self):
732 """Project ID to be billed for API requests made via this bucket.
733
734 If unset, API requests are billed to the bucket owner.
735
736 A user project is required for all operations on Requester Pays buckets.
737
738 See https://cloud.google.com/storage/docs/requester-pays#requirements for details.
739
740 :rtype: str
741 """
742 return self._user_project
743
744 @property
745 def generation(self):
746 """Retrieve the generation for the bucket.
747
748 :rtype: int or ``NoneType``
749 :returns: The generation of the bucket or ``None`` if the bucket's
750 resource has not been loaded from the server.
751 """
752 generation = self._properties.get("generation")
753 if generation is not None:
754 return int(generation)
755
756 @property
757 def soft_delete_time(self):
758 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted.
759
760 :rtype: :class:`datetime.datetime` or ``NoneType``
761 :returns:
762 (readonly) The time that the bucket became soft-deleted.
763 Note this property is only set for soft-deleted buckets.
764 """
765 soft_delete_time = self._properties.get("softDeleteTime")
766 if soft_delete_time is not None:
767 return _rfc3339_nanos_to_datetime(soft_delete_time)
768
769 @property
770 def hard_delete_time(self):
771 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted.
772
773 :rtype: :class:`datetime.datetime` or ``NoneType``
774 :returns:
775 (readonly) The time that the bucket will be permanently deleted.
776 Note this property is only set for soft-deleted buckets.
777 """
778 hard_delete_time = self._properties.get("hardDeleteTime")
779 if hard_delete_time is not None:
780 return _rfc3339_nanos_to_datetime(hard_delete_time)
781
782 @property
783 def _query_params(self):
784 """Default query parameters."""
785 params = super()._query_params
786 return params
787
788 @classmethod
789 def from_uri(cls, uri, client=None):
790 """Get a constructor for bucket object by URI.
791
792 .. code-block:: python
793
794 from google.cloud import storage
795 from google.cloud.storage.bucket import Bucket
796 client = storage.Client()
797 bucket = Bucket.from_uri("gs://bucket", client=client)
798
799 :type uri: str
800 :param uri: The bucket uri pass to get bucket object.
801
802 :type client: :class:`~google.cloud.storage.client.Client` or
803 ``NoneType``
804 :param client: (Optional) The client to use. Application code should
805 *always* pass ``client``.
806
807 :rtype: :class:`google.cloud.storage.bucket.Bucket`
808 :returns: The bucket object created.
809 """
810 scheme, netloc, path, query, frag = urlsplit(uri)
811
812 if scheme != "gs":
813 raise ValueError("URI scheme must be gs")
814
815 return cls(client, name=netloc)
816
817 @classmethod
818 def from_string(cls, uri, client=None):
819 """Get a constructor for bucket object by URI.
820
821 .. note::
822 Deprecated alias for :meth:`from_uri`.
823
824 .. code-block:: python
825
826 from google.cloud import storage
827 from google.cloud.storage.bucket import Bucket
828 client = storage.Client()
829 bucket = Bucket.from_string("gs://bucket", client=client)
830
831 :type uri: str
832 :param uri: The bucket uri pass to get bucket object.
833
834 :type client: :class:`~google.cloud.storage.client.Client` or
835 ``NoneType``
836 :param client: (Optional) The client to use. Application code should
837 *always* pass ``client``.
838
839 :rtype: :class:`google.cloud.storage.bucket.Bucket`
840 :returns: The bucket object created.
841 """
842 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2)
843 return Bucket.from_uri(uri=uri, client=client)
844
845 def blob(
846 self,
847 blob_name,
848 chunk_size=None,
849 encryption_key=None,
850 kms_key_name=None,
851 generation=None,
852 ):
853 """Factory constructor for blob object.
854
855 .. note::
856 This will not make an HTTP request; it simply instantiates
857 a blob object owned by this bucket.
858
859 :type blob_name: str
860 :param blob_name: The name of the blob to be instantiated.
861
862 :type chunk_size: int
863 :param chunk_size: The size of a chunk of data whenever iterating
864 (in bytes). This must be a multiple of 256 KB per
865 the API specification.
866
867 :type encryption_key: bytes
868 :param encryption_key:
869 (Optional) 32 byte encryption key for customer-supplied encryption.
870
871 :type kms_key_name: str
872 :param kms_key_name:
873 (Optional) Resource name of KMS key used to encrypt blob's content.
874
875 :type generation: long
876 :param generation: (Optional) If present, selects a specific revision of
877 this object.
878
879 :type crc32c_checksum: str
880 :param crc32c_checksum:
881 (Optional) If set, the CRC32C checksum of the blob's content.
882 CRC32c checksum, as described in RFC 4960, Appendix B; encoded using
883 base64 in big-endian byte order. See
884 Apenndix B: https://datatracker.ietf.org/doc/html/rfc4960#appendix-B
885 base64: https://datatracker.ietf.org/doc/html/rfc4648#section-4
886
887 :rtype: :class:`google.cloud.storage.blob.Blob`
888 :returns: The blob object created.
889 """
890 return Blob(
891 name=blob_name,
892 bucket=self,
893 chunk_size=chunk_size,
894 encryption_key=encryption_key,
895 kms_key_name=kms_key_name,
896 generation=generation,
897 )
898
899 def notification(
900 self,
901 topic_name=None,
902 topic_project=None,
903 custom_attributes=None,
904 event_types=None,
905 blob_name_prefix=None,
906 payload_format=NONE_PAYLOAD_FORMAT,
907 notification_id=None,
908 ):
909 """Factory: create a notification resource for the bucket.
910
911 See: :class:`.BucketNotification` for parameters.
912
913 :rtype: :class:`.BucketNotification`
914 """
915 return BucketNotification(
916 self,
917 topic_name=topic_name,
918 topic_project=topic_project,
919 custom_attributes=custom_attributes,
920 event_types=event_types,
921 blob_name_prefix=blob_name_prefix,
922 payload_format=payload_format,
923 notification_id=notification_id,
924 )
925
926 def exists(
927 self,
928 client=None,
929 timeout=_DEFAULT_TIMEOUT,
930 if_etag_match=None,
931 if_etag_not_match=None,
932 if_metageneration_match=None,
933 if_metageneration_not_match=None,
934 retry=DEFAULT_RETRY,
935 ):
936 """Determines whether or not this bucket exists.
937
938 If :attr:`user_project` is set, bills the API request to that project.
939
940 :type client: :class:`~google.cloud.storage.client.Client` or
941 ``NoneType``
942 :param client: (Optional) The client to use. If not passed, falls back
943 to the ``client`` stored on the current bucket.
944
945 :type timeout: float or tuple
946 :param timeout:
947 (Optional) The amount of time, in seconds, to wait
948 for the server response. See: :ref:`configuring_timeouts`
949
950 :type if_etag_match: Union[str, Set[str]]
951 :param if_etag_match: (Optional) Make the operation conditional on whether the
952 bucket's current ETag matches the given value.
953
954 :type if_etag_not_match: Union[str, Set[str]])
955 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
956 bucket's current ETag does not match the given value.
957
958 :type if_metageneration_match: long
959 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
960 bucket's current metageneration matches the given value.
961
962 :type if_metageneration_not_match: long
963 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
964 bucket's current metageneration does not match the given value.
965
966 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
967 :param retry:
968 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
969
970 :rtype: bool
971 :returns: True if the bucket exists in Cloud Storage.
972 """
973 with create_trace_span(name="Storage.Bucket.exists"):
974 client = self._require_client(client)
975 # We only need the status code (200 or not) so we seek to
976 # minimize the returned payload.
977 query_params = {"fields": "name"}
978
979 if self.user_project is not None:
980 query_params["userProject"] = self.user_project
981
982 _add_generation_match_parameters(
983 query_params,
984 if_metageneration_match=if_metageneration_match,
985 if_metageneration_not_match=if_metageneration_not_match,
986 )
987
988 headers = {}
989 _add_etag_match_headers(
990 headers,
991 if_etag_match=if_etag_match,
992 if_etag_not_match=if_etag_not_match,
993 )
994
995 try:
996 # We intentionally pass `_target_object=None` since fields=name
997 # would limit the local properties.
998 client._get_resource(
999 self.path,
1000 query_params=query_params,
1001 headers=headers,
1002 timeout=timeout,
1003 retry=retry,
1004 _target_object=None,
1005 )
1006 except NotFound:
1007 # NOTE: This will not fail immediately in a batch. However, when
1008 # Batch.finish() is called, the resulting `NotFound` will be
1009 # raised.
1010 return False
1011 return True
1012
1013 def create(
1014 self,
1015 client=None,
1016 project=None,
1017 location=None,
1018 predefined_acl=None,
1019 predefined_default_object_acl=None,
1020 enable_object_retention=False,
1021 timeout=_DEFAULT_TIMEOUT,
1022 retry=DEFAULT_RETRY,
1023 ):
1024 """Creates current bucket.
1025
1026 If the bucket already exists, will raise
1027 :class:`google.cloud.exceptions.Conflict`.
1028
1029 This implements "storage.buckets.insert".
1030
1031 If :attr:`user_project` is set, bills the API request to that project.
1032
1033 :type client: :class:`~google.cloud.storage.client.Client` or
1034 ``NoneType``
1035 :param client: (Optional) The client to use. If not passed, falls back
1036 to the ``client`` stored on the current bucket.
1037
1038 :type project: str
1039 :param project: (Optional) The project under which the bucket is to
1040 be created. If not passed, uses the project set on
1041 the client.
1042 :raises ValueError: if ``project`` is None and client's
1043 :attr:`project` is also None.
1044
1045 :type location: str
1046 :param location: (Optional) The location of the bucket. If not passed,
1047 the default location, US, will be used. See
1048 https://cloud.google.com/storage/docs/bucket-locations
1049
1050 :type predefined_acl: str
1051 :param predefined_acl:
1052 (Optional) Name of predefined ACL to apply to bucket. See:
1053 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1054
1055 :type predefined_default_object_acl: str
1056 :param predefined_default_object_acl:
1057 (Optional) Name of predefined ACL to apply to bucket's objects. See:
1058 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1059
1060 :type enable_object_retention: bool
1061 :param enable_object_retention:
1062 (Optional) Whether object retention should be enabled on this bucket. See:
1063 https://cloud.google.com/storage/docs/object-lock
1064
1065 :type timeout: float or tuple
1066 :param timeout:
1067 (Optional) The amount of time, in seconds, to wait
1068 for the server response. See: :ref:`configuring_timeouts`
1069
1070 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1071 :param retry:
1072 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1073 """
1074 with create_trace_span(name="Storage.Bucket.create"):
1075 client = self._require_client(client)
1076 client.create_bucket(
1077 bucket_or_name=self,
1078 project=project,
1079 user_project=self.user_project,
1080 location=location,
1081 predefined_acl=predefined_acl,
1082 predefined_default_object_acl=predefined_default_object_acl,
1083 enable_object_retention=enable_object_retention,
1084 timeout=timeout,
1085 retry=retry,
1086 )
1087
1088 def update(
1089 self,
1090 client=None,
1091 timeout=_DEFAULT_TIMEOUT,
1092 if_metageneration_match=None,
1093 if_metageneration_not_match=None,
1094 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1095 ):
1096 """Sends all properties in a PUT request.
1097
1098 Updates the ``_properties`` with the response from the backend.
1099
1100 If :attr:`user_project` is set, bills the API request to that project.
1101
1102 :type client: :class:`~google.cloud.storage.client.Client` or
1103 ``NoneType``
1104 :param client: the client to use. If not passed, falls back to the
1105 ``client`` stored on the current object.
1106
1107 :type timeout: float or tuple
1108 :param timeout:
1109 (Optional) The amount of time, in seconds, to wait
1110 for the server response. See: :ref:`configuring_timeouts`
1111
1112 :type if_metageneration_match: long
1113 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1114 blob's current metageneration matches the given value.
1115
1116 :type if_metageneration_not_match: long
1117 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1118 blob's current metageneration does not match the given value.
1119
1120 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1121 :param retry:
1122 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1123 """
1124 with create_trace_span(name="Storage.Bucket.update"):
1125 super(Bucket, self).update(
1126 client=client,
1127 timeout=timeout,
1128 if_metageneration_match=if_metageneration_match,
1129 if_metageneration_not_match=if_metageneration_not_match,
1130 retry=retry,
1131 )
1132
1133 def reload(
1134 self,
1135 client=None,
1136 projection="noAcl",
1137 timeout=_DEFAULT_TIMEOUT,
1138 if_etag_match=None,
1139 if_etag_not_match=None,
1140 if_metageneration_match=None,
1141 if_metageneration_not_match=None,
1142 retry=DEFAULT_RETRY,
1143 soft_deleted=None,
1144 ):
1145 """Reload properties from Cloud Storage.
1146
1147 If :attr:`user_project` is set, bills the API request to that project.
1148
1149 :type client: :class:`~google.cloud.storage.client.Client` or
1150 ``NoneType``
1151 :param client: the client to use. If not passed, falls back to the
1152 ``client`` stored on the current object.
1153
1154 :type projection: str
1155 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1156 Defaults to ``'noAcl'``. Specifies the set of
1157 properties to return.
1158
1159 :type timeout: float or tuple
1160 :param timeout:
1161 (Optional) The amount of time, in seconds, to wait
1162 for the server response. See: :ref:`configuring_timeouts`
1163
1164 :type if_etag_match: Union[str, Set[str]]
1165 :param if_etag_match: (Optional) Make the operation conditional on whether the
1166 bucket's current ETag matches the given value.
1167
1168 :type if_etag_not_match: Union[str, Set[str]])
1169 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
1170 bucket's current ETag does not match the given value.
1171
1172 :type if_metageneration_match: long
1173 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1174 bucket's current metageneration matches the given value.
1175
1176 :type if_metageneration_not_match: long
1177 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1178 bucket's current metageneration does not match the given value.
1179
1180 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1181 :param retry:
1182 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1183
1184 :type soft_deleted: bool
1185 :param soft_deleted: (Optional) If True, looks for a soft-deleted
1186 bucket. Will only return the bucket metadata if the bucket exists
1187 and is in a soft-deleted state. The bucket ``generation`` must be
1188 set if ``soft_deleted`` is set to True.
1189 See: https://cloud.google.com/storage/docs/soft-delete
1190 """
1191 with create_trace_span(name="Storage.Bucket.reload"):
1192 super(Bucket, self).reload(
1193 client=client,
1194 projection=projection,
1195 timeout=timeout,
1196 if_etag_match=if_etag_match,
1197 if_etag_not_match=if_etag_not_match,
1198 if_metageneration_match=if_metageneration_match,
1199 if_metageneration_not_match=if_metageneration_not_match,
1200 retry=retry,
1201 soft_deleted=soft_deleted,
1202 )
1203
1204 def patch(
1205 self,
1206 client=None,
1207 timeout=_DEFAULT_TIMEOUT,
1208 if_metageneration_match=None,
1209 if_metageneration_not_match=None,
1210 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1211 ):
1212 """Sends all changed properties in a PATCH request.
1213
1214 Updates the ``_properties`` with the response from the backend.
1215
1216 If :attr:`user_project` is set, bills the API request to that project.
1217
1218 :type client: :class:`~google.cloud.storage.client.Client` or
1219 ``NoneType``
1220 :param client: the client to use. If not passed, falls back to the
1221 ``client`` stored on the current object.
1222
1223 :type timeout: float or tuple
1224 :param timeout:
1225 (Optional) The amount of time, in seconds, to wait
1226 for the server response. See: :ref:`configuring_timeouts`
1227
1228 :type if_metageneration_match: long
1229 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1230 blob's current metageneration matches the given value.
1231
1232 :type if_metageneration_not_match: long
1233 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1234 blob's current metageneration does not match the given value.
1235
1236 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1237 :param retry:
1238 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1239 """
1240 with create_trace_span(name="Storage.Bucket.patch"):
1241 # Special case: For buckets, it is possible that labels are being
1242 # removed; this requires special handling.
1243 if self._label_removals:
1244 self._changes.add("labels")
1245 self._properties.setdefault("labels", {})
1246 for removed_label in self._label_removals:
1247 self._properties["labels"][removed_label] = None
1248
1249 # Call the superclass method.
1250 super(Bucket, self).patch(
1251 client=client,
1252 if_metageneration_match=if_metageneration_match,
1253 if_metageneration_not_match=if_metageneration_not_match,
1254 timeout=timeout,
1255 retry=retry,
1256 )
1257
1258 @property
1259 def acl(self):
1260 """Create our ACL on demand."""
1261 return self._acl
1262
1263 @property
1264 def default_object_acl(self):
1265 """Create our defaultObjectACL on demand."""
1266 return self._default_object_acl
1267
1268 @staticmethod
1269 def path_helper(bucket_name):
1270 """Relative URL path for a bucket.
1271
1272 :type bucket_name: str
1273 :param bucket_name: The bucket name in the path.
1274
1275 :rtype: str
1276 :returns: The relative URL path for ``bucket_name``.
1277 """
1278 return "/b/" + bucket_name
1279
1280 @property
1281 def path(self):
1282 """The URL path to this bucket."""
1283 if not self.name:
1284 raise ValueError("Cannot determine path without bucket name.")
1285
1286 return self.path_helper(self.name)
1287
1288 def get_blob(
1289 self,
1290 blob_name,
1291 client=None,
1292 encryption_key=None,
1293 generation=None,
1294 if_etag_match=None,
1295 if_etag_not_match=None,
1296 if_generation_match=None,
1297 if_generation_not_match=None,
1298 if_metageneration_match=None,
1299 if_metageneration_not_match=None,
1300 timeout=_DEFAULT_TIMEOUT,
1301 retry=DEFAULT_RETRY,
1302 soft_deleted=None,
1303 **kwargs,
1304 ):
1305 """Get a blob object by name.
1306
1307 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python)
1308 on how to retrieve metadata of an object.
1309
1310 If :attr:`user_project` is set, bills the API request to that project.
1311
1312 :type blob_name: str
1313 :param blob_name: The name of the blob to retrieve.
1314
1315 :type client: :class:`~google.cloud.storage.client.Client` or
1316 ``NoneType``
1317 :param client: (Optional) The client to use. If not passed, falls back
1318 to the ``client`` stored on the current bucket.
1319
1320 :type encryption_key: bytes
1321 :param encryption_key:
1322 (Optional) 32 byte encryption key for customer-supplied encryption.
1323 See
1324 https://cloud.google.com/storage/docs/encryption#customer-supplied.
1325
1326 :type generation: long
1327 :param generation:
1328 (Optional) If present, selects a specific revision of this object.
1329
1330 :type if_etag_match: Union[str, Set[str]]
1331 :param if_etag_match:
1332 (Optional) See :ref:`using-if-etag-match`
1333
1334 :type if_etag_not_match: Union[str, Set[str]]
1335 :param if_etag_not_match:
1336 (Optional) See :ref:`using-if-etag-not-match`
1337
1338 :type if_generation_match: long
1339 :param if_generation_match:
1340 (Optional) See :ref:`using-if-generation-match`
1341
1342 :type if_generation_not_match: long
1343 :param if_generation_not_match:
1344 (Optional) See :ref:`using-if-generation-not-match`
1345
1346 :type if_metageneration_match: long
1347 :param if_metageneration_match:
1348 (Optional) See :ref:`using-if-metageneration-match`
1349
1350 :type if_metageneration_not_match: long
1351 :param if_metageneration_not_match:
1352 (Optional) See :ref:`using-if-metageneration-not-match`
1353
1354 :type timeout: float or tuple
1355 :param timeout:
1356 (Optional) The amount of time, in seconds, to wait
1357 for the server response. See: :ref:`configuring_timeouts`
1358
1359 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1360 :param retry:
1361 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1362
1363 :type soft_deleted: bool
1364 :param soft_deleted:
1365 (Optional) If True, looks for a soft-deleted object. Will only return
1366 the object metadata if the object exists and is in a soft-deleted state.
1367 Object ``generation`` is required if ``soft_deleted`` is set to True.
1368 See: https://cloud.google.com/storage/docs/soft-delete
1369
1370 :param kwargs: Keyword arguments to pass to the
1371 :class:`~google.cloud.storage.blob.Blob` constructor.
1372
1373 :rtype: :class:`google.cloud.storage.blob.Blob` or None
1374 :returns: The blob object if it exists, otherwise None.
1375 """
1376 with create_trace_span(name="Storage.Bucket.getBlob"):
1377 blob = Blob(
1378 bucket=self,
1379 name=blob_name,
1380 encryption_key=encryption_key,
1381 generation=generation,
1382 **kwargs,
1383 )
1384 try:
1385 # NOTE: This will not fail immediately in a batch. However, when
1386 # Batch.finish() is called, the resulting `NotFound` will be
1387 # raised.
1388 blob.reload(
1389 client=client,
1390 timeout=timeout,
1391 if_etag_match=if_etag_match,
1392 if_etag_not_match=if_etag_not_match,
1393 if_generation_match=if_generation_match,
1394 if_generation_not_match=if_generation_not_match,
1395 if_metageneration_match=if_metageneration_match,
1396 if_metageneration_not_match=if_metageneration_not_match,
1397 retry=retry,
1398 soft_deleted=soft_deleted,
1399 )
1400 except NotFound:
1401 return None
1402 else:
1403 return blob
1404
1405 def list_blobs(
1406 self,
1407 max_results=None,
1408 page_token=None,
1409 prefix=None,
1410 delimiter=None,
1411 start_offset=None,
1412 end_offset=None,
1413 include_trailing_delimiter=None,
1414 versions=None,
1415 projection="noAcl",
1416 fields=None,
1417 client=None,
1418 timeout=_DEFAULT_TIMEOUT,
1419 retry=DEFAULT_RETRY,
1420 match_glob=None,
1421 include_folders_as_prefixes=None,
1422 soft_deleted=None,
1423 page_size=None,
1424 ):
1425 """Return an iterator used to find blobs in the bucket.
1426
1427 If :attr:`user_project` is set, bills the API request to that project.
1428
1429 :type max_results: int
1430 :param max_results:
1431 (Optional) The maximum number of blobs to return.
1432
1433 :type page_token: str
1434 :param page_token:
1435 (Optional) If present, return the next batch of blobs, using the
1436 value, which must correspond to the ``nextPageToken`` value
1437 returned in the previous response. Deprecated: use the ``pages``
1438 property of the returned iterator instead of manually passing the
1439 token.
1440
1441 :type prefix: str
1442 :param prefix: (Optional) Prefix used to filter blobs.
1443
1444 :type delimiter: str
1445 :param delimiter: (Optional) Delimiter, used with ``prefix`` to
1446 emulate hierarchy.
1447
1448 :type start_offset: str
1449 :param start_offset:
1450 (Optional) Filter results to objects whose names are
1451 lexicographically equal to or after ``startOffset``. If
1452 ``endOffset`` is also set, the objects listed will have names
1453 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive).
1454
1455 :type end_offset: str
1456 :param end_offset:
1457 (Optional) Filter results to objects whose names are
1458 lexicographically before ``endOffset``. If ``startOffset`` is also
1459 set, the objects listed will have names between ``startOffset``
1460 (inclusive) and ``endOffset`` (exclusive).
1461
1462 :type include_trailing_delimiter: boolean
1463 :param include_trailing_delimiter:
1464 (Optional) If true, objects that end in exactly one instance of
1465 ``delimiter`` will have their metadata included in ``items`` in
1466 addition to ``prefixes``.
1467
1468 :type versions: bool
1469 :param versions: (Optional) Whether object versions should be returned
1470 as separate blobs.
1471
1472 :type projection: str
1473 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1474 Defaults to ``'noAcl'``. Specifies the set of
1475 properties to return.
1476
1477 :type fields: str
1478 :param fields:
1479 (Optional) Selector specifying which fields to include
1480 in a partial response. Must be a list of fields. For
1481 example to get a partial response with just the next
1482 page token and the name and language of each blob returned:
1483 ``'items(name,contentLanguage),nextPageToken'``.
1484 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
1485
1486 :type client: :class:`~google.cloud.storage.client.Client`
1487 :param client: (Optional) The client to use. If not passed, falls back
1488 to the ``client`` stored on the current bucket.
1489
1490 :type timeout: float or tuple
1491 :param timeout:
1492 (Optional) The amount of time, in seconds, to wait
1493 for the server response. See: :ref:`configuring_timeouts`
1494
1495 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1496 :param retry:
1497 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1498
1499 :type match_glob: str
1500 :param match_glob:
1501 (Optional) A glob pattern used to filter results (for example, foo*bar).
1502 The string value must be UTF-8 encoded. See:
1503 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob
1504
1505 :type include_folders_as_prefixes: bool
1506 (Optional) If true, includes Folders and Managed Folders in the set of
1507 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /.
1508 See: https://cloud.google.com/storage/docs/managed-folders
1509
1510 :type soft_deleted: bool
1511 :param soft_deleted:
1512 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing
1513 generation number. This parameter can only be used successfully if the bucket has a soft delete policy.
1514 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See:
1515 https://cloud.google.com/storage/docs/soft-delete
1516
1517 :type page_size: int
1518 :param page_size:
1519 (Optional) Maximum number of blobs to return in each page.
1520 Defaults to a value set by the API.
1521
1522 :rtype: :class:`~google.api_core.page_iterator.Iterator`
1523 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
1524 in this bucket matching the arguments.
1525 """
1526 with create_trace_span(name="Storage.Bucket.listBlobs"):
1527 client = self._require_client(client)
1528 return client.list_blobs(
1529 self,
1530 max_results=max_results,
1531 page_token=page_token,
1532 prefix=prefix,
1533 delimiter=delimiter,
1534 start_offset=start_offset,
1535 end_offset=end_offset,
1536 include_trailing_delimiter=include_trailing_delimiter,
1537 versions=versions,
1538 projection=projection,
1539 fields=fields,
1540 page_size=page_size,
1541 timeout=timeout,
1542 retry=retry,
1543 match_glob=match_glob,
1544 include_folders_as_prefixes=include_folders_as_prefixes,
1545 soft_deleted=soft_deleted,
1546 )
1547
1548 def list_notifications(
1549 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
1550 ):
1551 """List Pub / Sub notifications for this bucket.
1552
1553 See:
1554 https://cloud.google.com/storage/docs/json_api/v1/notifications/list
1555
1556 If :attr:`user_project` is set, bills the API request to that project.
1557
1558 :type client: :class:`~google.cloud.storage.client.Client` or
1559 ``NoneType``
1560 :param client: (Optional) The client to use. If not passed, falls back
1561 to the ``client`` stored on the current bucket.
1562 :type timeout: float or tuple
1563 :param timeout:
1564 (Optional) The amount of time, in seconds, to wait
1565 for the server response. See: :ref:`configuring_timeouts`
1566
1567 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1568 :param retry:
1569 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1570
1571 :rtype: list of :class:`.BucketNotification`
1572 :returns: notification instances
1573 """
1574 with create_trace_span(name="Storage.Bucket.listNotifications"):
1575 client = self._require_client(client)
1576 path = self.path + "/notificationConfigs"
1577 iterator = client._list_resource(
1578 path,
1579 _item_to_notification,
1580 timeout=timeout,
1581 retry=retry,
1582 )
1583 iterator.bucket = self
1584 return iterator
1585
1586 def get_notification(
1587 self,
1588 notification_id,
1589 client=None,
1590 timeout=_DEFAULT_TIMEOUT,
1591 retry=DEFAULT_RETRY,
1592 ):
1593 """Get Pub / Sub notification for this bucket.
1594
1595 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get)
1596 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python).
1597
1598 If :attr:`user_project` is set, bills the API request to that project.
1599
1600 :type notification_id: str
1601 :param notification_id: The notification id to retrieve the notification configuration.
1602
1603 :type client: :class:`~google.cloud.storage.client.Client` or
1604 ``NoneType``
1605 :param client: (Optional) The client to use. If not passed, falls back
1606 to the ``client`` stored on the current bucket.
1607 :type timeout: float or tuple
1608 :param timeout:
1609 (Optional) The amount of time, in seconds, to wait
1610 for the server response. See: :ref:`configuring_timeouts`
1611
1612 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1613 :param retry:
1614 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1615
1616 :rtype: :class:`.BucketNotification`
1617 :returns: notification instance.
1618 """
1619 with create_trace_span(name="Storage.Bucket.getNotification"):
1620 notification = self.notification(notification_id=notification_id)
1621 notification.reload(client=client, timeout=timeout, retry=retry)
1622 return notification
1623
1624 def delete(
1625 self,
1626 force=False,
1627 client=None,
1628 if_metageneration_match=None,
1629 if_metageneration_not_match=None,
1630 timeout=_DEFAULT_TIMEOUT,
1631 retry=DEFAULT_RETRY,
1632 ):
1633 """Delete this bucket.
1634
1635 The bucket **must** be empty in order to submit a delete request. If
1636 ``force=True`` is passed, this will first attempt to delete all the
1637 objects / blobs in the bucket (i.e. try to empty the bucket).
1638
1639 If the bucket doesn't exist, this will raise
1640 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
1641 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`.
1642
1643 If ``force=True`` and the bucket contains more than 256 objects / blobs
1644 this will cowardly refuse to delete the objects (or the bucket). This
1645 is to prevent accidental bucket deletion and to prevent extremely long
1646 runtime of this method. Also note that ``force=True`` is not supported
1647 in a ``Batch`` context.
1648
1649 If :attr:`user_project` is set, bills the API request to that project.
1650
1651 :type force: bool
1652 :param force: If True, empties the bucket's objects then deletes it.
1653
1654 :type client: :class:`~google.cloud.storage.client.Client` or
1655 ``NoneType``
1656 :param client: (Optional) The client to use. If not passed, falls back
1657 to the ``client`` stored on the current bucket.
1658
1659 :type if_metageneration_match: long
1660 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1661 blob's current metageneration matches the given value.
1662
1663 :type if_metageneration_not_match: long
1664 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1665 blob's current metageneration does not match the given value.
1666
1667 :type timeout: float or tuple
1668 :param timeout:
1669 (Optional) The amount of time, in seconds, to wait
1670 for the server response. See: :ref:`configuring_timeouts`
1671
1672 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1673 :param retry:
1674 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1675
1676 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
1677 contains more than 256 objects / blobs.
1678 """
1679 with create_trace_span(name="Storage.Bucket.delete"):
1680 client = self._require_client(client)
1681 query_params = {}
1682
1683 if self.user_project is not None:
1684 query_params["userProject"] = self.user_project
1685
1686 _add_generation_match_parameters(
1687 query_params,
1688 if_metageneration_match=if_metageneration_match,
1689 if_metageneration_not_match=if_metageneration_not_match,
1690 )
1691 if force:
1692 blobs = list(
1693 self.list_blobs(
1694 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
1695 client=client,
1696 timeout=timeout,
1697 retry=retry,
1698 versions=True,
1699 )
1700 )
1701 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
1702 message = (
1703 "Refusing to delete bucket with more than "
1704 "%d objects. If you actually want to delete "
1705 "this bucket, please delete the objects "
1706 "yourself before calling Bucket.delete()."
1707 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
1708 raise ValueError(message)
1709
1710 # Ignore 404 errors on delete.
1711 self.delete_blobs(
1712 blobs,
1713 on_error=lambda blob: None,
1714 client=client,
1715 timeout=timeout,
1716 retry=retry,
1717 preserve_generation=True,
1718 )
1719
1720 # We intentionally pass `_target_object=None` since a DELETE
1721 # request has no response value (whether in a standard request or
1722 # in a batch request).
1723 client._delete_resource(
1724 self.path,
1725 query_params=query_params,
1726 timeout=timeout,
1727 retry=retry,
1728 _target_object=None,
1729 )
1730
1731 def delete_blob(
1732 self,
1733 blob_name,
1734 client=None,
1735 generation=None,
1736 if_generation_match=None,
1737 if_generation_not_match=None,
1738 if_metageneration_match=None,
1739 if_metageneration_not_match=None,
1740 timeout=_DEFAULT_TIMEOUT,
1741 retry=DEFAULT_RETRY,
1742 ):
1743 """Deletes a blob from the current bucket.
1744
1745 If :attr:`user_project` is set, bills the API request to that project.
1746
1747 :type blob_name: str
1748 :param blob_name: A blob name to delete.
1749
1750 :type client: :class:`~google.cloud.storage.client.Client` or
1751 ``NoneType``
1752 :param client: (Optional) The client to use. If not passed, falls back
1753 to the ``client`` stored on the current bucket.
1754
1755 :type generation: long
1756 :param generation: (Optional) If present, permanently deletes a specific
1757 revision of this object.
1758
1759 :type if_generation_match: long
1760 :param if_generation_match:
1761 (Optional) See :ref:`using-if-generation-match`
1762
1763 :type if_generation_not_match: long
1764 :param if_generation_not_match:
1765 (Optional) See :ref:`using-if-generation-not-match`
1766
1767 :type if_metageneration_match: long
1768 :param if_metageneration_match:
1769 (Optional) See :ref:`using-if-metageneration-match`
1770
1771 :type if_metageneration_not_match: long
1772 :param if_metageneration_not_match:
1773 (Optional) See :ref:`using-if-metageneration-not-match`
1774
1775 :type timeout: float or tuple
1776 :param timeout:
1777 (Optional) The amount of time, in seconds, to wait
1778 for the server response. See: :ref:`configuring_timeouts`
1779
1780 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1781 :param retry: (Optional) How to retry the RPC. A None value will disable
1782 retries. A google.api_core.retry.Retry value will enable retries,
1783 and the object will define retriable response codes and errors and
1784 configure backoff and timeout options.
1785
1786 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1787 Retry object and activates it only if certain conditions are met.
1788 This class exists to provide safe defaults for RPC calls that are
1789 not technically safe to retry normally (due to potential data
1790 duplication or other side-effects) but become safe to retry if a
1791 condition such as if_generation_match is set.
1792
1793 See the retry.py source code and docstrings in this package
1794 (google.cloud.storage.retry) for information on retry types and how
1795 to configure them.
1796
1797 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound
1798 if the blob isn't found. To suppress
1799 the exception, use :meth:`delete_blobs` by passing a no-op
1800 ``on_error`` callback.
1801 """
1802 with create_trace_span(name="Storage.Bucket.deleteBlob"):
1803 client = self._require_client(client)
1804 blob = Blob(blob_name, bucket=self, generation=generation)
1805
1806 query_params = copy.deepcopy(blob._query_params)
1807 _add_generation_match_parameters(
1808 query_params,
1809 if_generation_match=if_generation_match,
1810 if_generation_not_match=if_generation_not_match,
1811 if_metageneration_match=if_metageneration_match,
1812 if_metageneration_not_match=if_metageneration_not_match,
1813 )
1814 # We intentionally pass `_target_object=None` since a DELETE
1815 # request has no response value (whether in a standard request or
1816 # in a batch request).
1817 client._delete_resource(
1818 blob.path,
1819 query_params=query_params,
1820 timeout=timeout,
1821 retry=retry,
1822 _target_object=None,
1823 )
1824
1825 def delete_blobs(
1826 self,
1827 blobs,
1828 on_error=None,
1829 client=None,
1830 preserve_generation=False,
1831 timeout=_DEFAULT_TIMEOUT,
1832 if_generation_match=None,
1833 if_generation_not_match=None,
1834 if_metageneration_match=None,
1835 if_metageneration_not_match=None,
1836 retry=DEFAULT_RETRY,
1837 ):
1838 """Deletes a list of blobs from the current bucket.
1839
1840 Uses :meth:`delete_blob` to delete each individual blob.
1841
1842 By default, any generation information in the list of blobs is ignored, and the
1843 live versions of all blobs are deleted. Set `preserve_generation` to True
1844 if blob generation should instead be propagated from the list of blobs.
1845
1846 If :attr:`user_project` is set, bills the API request to that project.
1847
1848 :type blobs: list
1849 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
1850 blob names to delete.
1851
1852 :type on_error: callable
1853 :param on_error: (Optional) Takes single argument: ``blob``.
1854 Called once for each blob raising
1855 :class:`~google.cloud.exceptions.NotFound`;
1856 otherwise, the exception is propagated.
1857 Note that ``on_error`` is not supported in a ``Batch`` context.
1858
1859 :type client: :class:`~google.cloud.storage.client.Client`
1860 :param client: (Optional) The client to use. If not passed, falls back
1861 to the ``client`` stored on the current bucket.
1862
1863 :type preserve_generation: bool
1864 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object,
1865 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob
1866 objects can have their generation set in this way.
1867 Default: False.
1868
1869 :type if_generation_match: list of long
1870 :param if_generation_match:
1871 (Optional) See :ref:`using-if-generation-match`
1872 Note that the length of the list must match the length of
1873 The list must match ``blobs`` item-to-item.
1874
1875 :type if_generation_not_match: list of long
1876 :param if_generation_not_match:
1877 (Optional) See :ref:`using-if-generation-not-match`
1878 The list must match ``blobs`` item-to-item.
1879
1880 :type if_metageneration_match: list of long
1881 :param if_metageneration_match:
1882 (Optional) See :ref:`using-if-metageneration-match`
1883 The list must match ``blobs`` item-to-item.
1884
1885 :type if_metageneration_not_match: list of long
1886 :param if_metageneration_not_match:
1887 (Optional) See :ref:`using-if-metageneration-not-match`
1888 The list must match ``blobs`` item-to-item.
1889
1890 :type timeout: float or tuple
1891 :param timeout:
1892 (Optional) The amount of time, in seconds, to wait
1893 for the server response. See: :ref:`configuring_timeouts`
1894
1895 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1896 :param retry: (Optional) How to retry the RPC. A None value will disable
1897 retries. A google.api_core.retry.Retry value will enable retries,
1898 and the object will define retriable response codes and errors and
1899 configure backoff and timeout options.
1900
1901 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1902 Retry object and activates it only if certain conditions are met.
1903 This class exists to provide safe defaults for RPC calls that are
1904 not technically safe to retry normally (due to potential data
1905 duplication or other side-effects) but become safe to retry if a
1906 condition such as if_generation_match is set.
1907
1908 See the retry.py source code and docstrings in this package
1909 (google.cloud.storage.retry) for information on retry types and how
1910 to configure them.
1911
1912 :raises: :class:`~google.cloud.exceptions.NotFound` (if
1913 `on_error` is not passed).
1914 """
1915 with create_trace_span(name="Storage.Bucket.deleteBlobs"):
1916 _raise_if_len_differs(
1917 len(blobs),
1918 if_generation_match=if_generation_match,
1919 if_generation_not_match=if_generation_not_match,
1920 if_metageneration_match=if_metageneration_match,
1921 if_metageneration_not_match=if_metageneration_not_match,
1922 )
1923 if_generation_match = iter(if_generation_match or [])
1924 if_generation_not_match = iter(if_generation_not_match or [])
1925 if_metageneration_match = iter(if_metageneration_match or [])
1926 if_metageneration_not_match = iter(if_metageneration_not_match or [])
1927
1928 for blob in blobs:
1929 try:
1930 blob_name = blob
1931 generation = None
1932 if not isinstance(blob_name, str):
1933 blob_name = blob.name
1934 generation = blob.generation if preserve_generation else None
1935
1936 self.delete_blob(
1937 blob_name,
1938 client=client,
1939 generation=generation,
1940 if_generation_match=next(if_generation_match, None),
1941 if_generation_not_match=next(if_generation_not_match, None),
1942 if_metageneration_match=next(if_metageneration_match, None),
1943 if_metageneration_not_match=next(
1944 if_metageneration_not_match, None
1945 ),
1946 timeout=timeout,
1947 retry=retry,
1948 )
1949 except NotFound:
1950 if on_error is not None:
1951 on_error(blob)
1952 else:
1953 raise
1954
1955 def copy_blob(
1956 self,
1957 blob,
1958 destination_bucket,
1959 new_name=None,
1960 client=None,
1961 preserve_acl=True,
1962 source_generation=None,
1963 if_generation_match=None,
1964 if_generation_not_match=None,
1965 if_metageneration_match=None,
1966 if_metageneration_not_match=None,
1967 if_source_generation_match=None,
1968 if_source_generation_not_match=None,
1969 if_source_metageneration_match=None,
1970 if_source_metageneration_not_match=None,
1971 timeout=_DEFAULT_TIMEOUT,
1972 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1973 ):
1974 """Copy the given blob to the given bucket, optionally with a new name.
1975
1976 If :attr:`user_project` is set, bills the API request to that project.
1977
1978 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy)
1979 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python).
1980
1981 :type blob: :class:`google.cloud.storage.blob.Blob`
1982 :param blob: The blob to be copied.
1983
1984 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
1985 :param destination_bucket: The bucket into which the blob should be
1986 copied.
1987
1988 :type new_name: str
1989 :param new_name: (Optional) The new name for the copied file.
1990
1991 :type client: :class:`~google.cloud.storage.client.Client` or
1992 ``NoneType``
1993 :param client: (Optional) The client to use. If not passed, falls back
1994 to the ``client`` stored on the current bucket.
1995
1996 :type preserve_acl: bool
1997 :param preserve_acl: DEPRECATED. This argument is not functional!
1998 (Optional) Copies ACL from old blob to new blob.
1999 Default: True.
2000 Note that ``preserve_acl`` is not supported in a
2001 ``Batch`` context.
2002
2003 :type source_generation: long
2004 :param source_generation: (Optional) The generation of the blob to be
2005 copied.
2006
2007 :type if_generation_match: long
2008 :param if_generation_match:
2009 (Optional) See :ref:`using-if-generation-match`
2010 Note that the generation to be matched is that of the
2011 ``destination`` blob.
2012
2013 :type if_generation_not_match: long
2014 :param if_generation_not_match:
2015 (Optional) See :ref:`using-if-generation-not-match`
2016 Note that the generation to be matched is that of the
2017 ``destination`` blob.
2018
2019 :type if_metageneration_match: long
2020 :param if_metageneration_match:
2021 (Optional) See :ref:`using-if-metageneration-match`
2022 Note that the metageneration to be matched is that of the
2023 ``destination`` blob.
2024
2025 :type if_metageneration_not_match: long
2026 :param if_metageneration_not_match:
2027 (Optional) See :ref:`using-if-metageneration-not-match`
2028 Note that the metageneration to be matched is that of the
2029 ``destination`` blob.
2030
2031 :type if_source_generation_match: long
2032 :param if_source_generation_match:
2033 (Optional) Makes the operation conditional on whether the source
2034 object's generation matches the given value.
2035
2036 :type if_source_generation_not_match: long
2037 :param if_source_generation_not_match:
2038 (Optional) Makes the operation conditional on whether the source
2039 object's generation does not match the given value.
2040
2041 :type if_source_metageneration_match: long
2042 :param if_source_metageneration_match:
2043 (Optional) Makes the operation conditional on whether the source
2044 object's current metageneration matches the given value.
2045
2046 :type if_source_metageneration_not_match: long
2047 :param if_source_metageneration_not_match:
2048 (Optional) Makes the operation conditional on whether the source
2049 object's current metageneration does not match the given value.
2050
2051 :type timeout: float or tuple
2052 :param timeout:
2053 (Optional) The amount of time, in seconds, to wait
2054 for the server response. See: :ref:`configuring_timeouts`
2055
2056 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2057 :param retry:
2058 (Optional) How to retry the RPC.
2059 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2060 policy which will only enable retries if ``if_generation_match`` or ``generation``
2061 is set, in order to ensure requests are idempotent before retrying them.
2062 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2063 to enable retries regardless of generation precondition setting.
2064 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2065
2066 :rtype: :class:`google.cloud.storage.blob.Blob`
2067 :returns: The new Blob.
2068 """
2069 with create_trace_span(name="Storage.Bucket.copyBlob"):
2070 client = self._require_client(client)
2071 query_params = {}
2072
2073 if self.user_project is not None:
2074 query_params["userProject"] = self.user_project
2075
2076 if source_generation is not None:
2077 query_params["sourceGeneration"] = source_generation
2078
2079 _add_generation_match_parameters(
2080 query_params,
2081 if_generation_match=if_generation_match,
2082 if_generation_not_match=if_generation_not_match,
2083 if_metageneration_match=if_metageneration_match,
2084 if_metageneration_not_match=if_metageneration_not_match,
2085 if_source_generation_match=if_source_generation_match,
2086 if_source_generation_not_match=if_source_generation_not_match,
2087 if_source_metageneration_match=if_source_metageneration_match,
2088 if_source_metageneration_not_match=if_source_metageneration_not_match,
2089 )
2090
2091 if new_name is None:
2092 new_name = blob.name
2093
2094 new_blob = Blob(bucket=destination_bucket, name=new_name)
2095 api_path = blob.path + "/copyTo" + new_blob.path
2096 copy_result = client._post_resource(
2097 api_path,
2098 None,
2099 query_params=query_params,
2100 timeout=timeout,
2101 retry=retry,
2102 _target_object=new_blob,
2103 )
2104
2105 if not preserve_acl:
2106 new_blob.acl.save(acl={}, client=client, timeout=timeout)
2107
2108 new_blob._set_properties(copy_result)
2109 return new_blob
2110
2111 def rename_blob(
2112 self,
2113 blob,
2114 new_name,
2115 client=None,
2116 if_generation_match=None,
2117 if_generation_not_match=None,
2118 if_metageneration_match=None,
2119 if_metageneration_not_match=None,
2120 if_source_generation_match=None,
2121 if_source_generation_not_match=None,
2122 if_source_metageneration_match=None,
2123 if_source_metageneration_not_match=None,
2124 timeout=_DEFAULT_TIMEOUT,
2125 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2126 ):
2127 """Rename the given blob using copy and delete operations.
2128
2129 If :attr:`user_project` is set, bills the API request to that project.
2130
2131 Effectively, copies blob to the same bucket with a new name, then
2132 deletes the blob.
2133
2134 .. warning::
2135
2136 This method will first duplicate the data and then delete the
2137 old blob. This means that with very large objects renaming
2138 could be a very (temporarily) costly or a very slow operation.
2139 If you need more control over the copy and deletion, instead
2140 use ``google.cloud.storage.blob.Blob.copy_to`` and
2141 ``google.cloud.storage.blob.Blob.delete`` directly.
2142
2143 Also note that this method is not fully supported in a
2144 ``Batch`` context.
2145
2146 :type blob: :class:`google.cloud.storage.blob.Blob`
2147 :param blob: The blob to be renamed.
2148
2149 :type new_name: str
2150 :param new_name: The new name for this blob.
2151
2152 :type client: :class:`~google.cloud.storage.client.Client` or
2153 ``NoneType``
2154 :param client: (Optional) The client to use. If not passed, falls back
2155 to the ``client`` stored on the current bucket.
2156
2157 :type if_generation_match: long
2158 :param if_generation_match:
2159 (Optional) See :ref:`using-if-generation-match`
2160 Note that the generation to be matched is that of the
2161 ``destination`` blob.
2162
2163 :type if_generation_not_match: long
2164 :param if_generation_not_match:
2165 (Optional) See :ref:`using-if-generation-not-match`
2166 Note that the generation to be matched is that of the
2167 ``destination`` blob.
2168
2169 :type if_metageneration_match: long
2170 :param if_metageneration_match:
2171 (Optional) See :ref:`using-if-metageneration-match`
2172 Note that the metageneration to be matched is that of the
2173 ``destination`` blob.
2174
2175 :type if_metageneration_not_match: long
2176 :param if_metageneration_not_match:
2177 (Optional) See :ref:`using-if-metageneration-not-match`
2178 Note that the metageneration to be matched is that of the
2179 ``destination`` blob.
2180
2181 :type if_source_generation_match: long
2182 :param if_source_generation_match:
2183 (Optional) Makes the operation conditional on whether the source
2184 object's generation matches the given value. Also used in the
2185 (implied) delete request.
2186
2187 :type if_source_generation_not_match: long
2188 :param if_source_generation_not_match:
2189 (Optional) Makes the operation conditional on whether the source
2190 object's generation does not match the given value. Also used in
2191 the (implied) delete request.
2192
2193 :type if_source_metageneration_match: long
2194 :param if_source_metageneration_match:
2195 (Optional) Makes the operation conditional on whether the source
2196 object's current metageneration matches the given value. Also used
2197 in the (implied) delete request.
2198
2199 :type if_source_metageneration_not_match: long
2200 :param if_source_metageneration_not_match:
2201 (Optional) Makes the operation conditional on whether the source
2202 object's current metageneration does not match the given value.
2203 Also used in the (implied) delete request.
2204
2205 :type timeout: float or tuple
2206 :param timeout:
2207 (Optional) The amount of time, in seconds, to wait
2208 for the server response. See: :ref:`configuring_timeouts`
2209
2210 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2211 :param retry:
2212 (Optional) How to retry the RPC.
2213 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2214 policy which will only enable retries if ``if_generation_match`` or ``generation``
2215 is set, in order to ensure requests are idempotent before retrying them.
2216 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2217 to enable retries regardless of generation precondition setting.
2218 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2219
2220 :rtype: :class:`Blob`
2221 :returns: The newly-renamed blob.
2222 """
2223 with create_trace_span(name="Storage.Bucket.renameBlob"):
2224 same_name = blob.name == new_name
2225
2226 new_blob = self.copy_blob(
2227 blob,
2228 self,
2229 new_name,
2230 client=client,
2231 timeout=timeout,
2232 if_generation_match=if_generation_match,
2233 if_generation_not_match=if_generation_not_match,
2234 if_metageneration_match=if_metageneration_match,
2235 if_metageneration_not_match=if_metageneration_not_match,
2236 if_source_generation_match=if_source_generation_match,
2237 if_source_generation_not_match=if_source_generation_not_match,
2238 if_source_metageneration_match=if_source_metageneration_match,
2239 if_source_metageneration_not_match=if_source_metageneration_not_match,
2240 retry=retry,
2241 )
2242
2243 if not same_name:
2244 blob.delete(
2245 client=client,
2246 timeout=timeout,
2247 if_generation_match=if_source_generation_match,
2248 if_generation_not_match=if_source_generation_not_match,
2249 if_metageneration_match=if_source_metageneration_match,
2250 if_metageneration_not_match=if_source_metageneration_not_match,
2251 retry=retry,
2252 )
2253 return new_blob
2254
2255 def move_blob(
2256 self,
2257 blob,
2258 new_name,
2259 client=None,
2260 if_generation_match=None,
2261 if_generation_not_match=None,
2262 if_metageneration_match=None,
2263 if_metageneration_not_match=None,
2264 if_source_generation_match=None,
2265 if_source_generation_not_match=None,
2266 if_source_metageneration_match=None,
2267 if_source_metageneration_not_match=None,
2268 timeout=_DEFAULT_TIMEOUT,
2269 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2270 ):
2271 """Move a blob to a new name atomically.
2272
2273 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2274
2275 :type blob: :class:`google.cloud.storage.blob.Blob`
2276 :param blob: The blob to be renamed.
2277
2278 :type new_name: str
2279 :param new_name: The new name for this blob.
2280
2281 :type client: :class:`~google.cloud.storage.client.Client` or
2282 ``NoneType``
2283 :param client: (Optional) The client to use. If not passed, falls back
2284 to the ``client`` stored on the current bucket.
2285
2286 :type if_generation_match: int
2287 :param if_generation_match:
2288 (Optional) See :ref:`using-if-generation-match`
2289 Note that the generation to be matched is that of the
2290 ``destination`` blob.
2291
2292 :type if_generation_not_match: int
2293 :param if_generation_not_match:
2294 (Optional) See :ref:`using-if-generation-not-match`
2295 Note that the generation to be matched is that of the
2296 ``destination`` blob.
2297
2298 :type if_metageneration_match: int
2299 :param if_metageneration_match:
2300 (Optional) See :ref:`using-if-metageneration-match`
2301 Note that the metageneration to be matched is that of the
2302 ``destination`` blob.
2303
2304 :type if_metageneration_not_match: int
2305 :param if_metageneration_not_match:
2306 (Optional) See :ref:`using-if-metageneration-not-match`
2307 Note that the metageneration to be matched is that of the
2308 ``destination`` blob.
2309
2310 :type if_source_generation_match: int
2311 :param if_source_generation_match:
2312 (Optional) Makes the operation conditional on whether the source
2313 object's generation matches the given value.
2314
2315 :type if_source_generation_not_match: int
2316 :param if_source_generation_not_match:
2317 (Optional) Makes the operation conditional on whether the source
2318 object's generation does not match the given value.
2319
2320 :type if_source_metageneration_match: int
2321 :param if_source_metageneration_match:
2322 (Optional) Makes the operation conditional on whether the source
2323 object's current metageneration matches the given value.
2324
2325 :type if_source_metageneration_not_match: int
2326 :param if_source_metageneration_not_match:
2327 (Optional) Makes the operation conditional on whether the source
2328 object's current metageneration does not match the given value.
2329
2330 :type timeout: float or tuple
2331 :param timeout:
2332 (Optional) The amount of time, in seconds, to wait
2333 for the server response. See: :ref:`configuring_timeouts`
2334
2335 :type retry: google.api_core.retry.Retry
2336 :param retry:
2337 (Optional) How to retry the RPC.
2338 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2339
2340 :rtype: :class:`Blob`
2341 :returns: The newly-moved blob.
2342 """
2343 with create_trace_span(name="Storage.Bucket.moveBlob"):
2344 client = self._require_client(client)
2345 query_params = {}
2346
2347 if self.user_project is not None:
2348 query_params["userProject"] = self.user_project
2349
2350 _add_generation_match_parameters(
2351 query_params,
2352 if_generation_match=if_generation_match,
2353 if_generation_not_match=if_generation_not_match,
2354 if_metageneration_match=if_metageneration_match,
2355 if_metageneration_not_match=if_metageneration_not_match,
2356 if_source_generation_match=if_source_generation_match,
2357 if_source_generation_not_match=if_source_generation_not_match,
2358 if_source_metageneration_match=if_source_metageneration_match,
2359 if_source_metageneration_not_match=if_source_metageneration_not_match,
2360 )
2361
2362 new_blob = Blob(bucket=self, name=new_name)
2363 api_path = blob.path + "/moveTo/o/" + new_blob.name
2364 move_result = client._post_resource(
2365 api_path,
2366 None,
2367 query_params=query_params,
2368 timeout=timeout,
2369 retry=retry,
2370 _target_object=new_blob,
2371 )
2372
2373 new_blob._set_properties(move_result)
2374 return new_blob
2375
2376 def restore_blob(
2377 self,
2378 blob_name,
2379 client=None,
2380 generation=None,
2381 copy_source_acl=None,
2382 projection=None,
2383 if_generation_match=None,
2384 if_generation_not_match=None,
2385 if_metageneration_match=None,
2386 if_metageneration_not_match=None,
2387 timeout=_DEFAULT_TIMEOUT,
2388 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2389 ):
2390 """Restores a soft-deleted object.
2391
2392 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2393
2394 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore)
2395
2396 :type blob_name: str
2397 :param blob_name: The name of the blob to be restored.
2398
2399 :type client: :class:`~google.cloud.storage.client.Client`
2400 :param client: (Optional) The client to use. If not passed, falls back
2401 to the ``client`` stored on the current bucket.
2402
2403 :type generation: int
2404 :param generation: Selects the specific revision of the object.
2405
2406 :type copy_source_acl: bool
2407 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls.
2408
2409 :type projection: str
2410 :param projection: (Optional) Specifies the set of properties to return.
2411 If used, must be 'full' or 'noAcl'.
2412
2413 :type if_generation_match: long
2414 :param if_generation_match:
2415 (Optional) See :ref:`using-if-generation-match`
2416
2417 :type if_generation_not_match: long
2418 :param if_generation_not_match:
2419 (Optional) See :ref:`using-if-generation-not-match`
2420
2421 :type if_metageneration_match: long
2422 :param if_metageneration_match:
2423 (Optional) See :ref:`using-if-metageneration-match`
2424
2425 :type if_metageneration_not_match: long
2426 :param if_metageneration_not_match:
2427 (Optional) See :ref:`using-if-metageneration-not-match`
2428
2429 :type timeout: float or tuple
2430 :param timeout:
2431 (Optional) The amount of time, in seconds, to wait
2432 for the server response. See: :ref:`configuring_timeouts`
2433
2434 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2435 :param retry:
2436 (Optional) How to retry the RPC.
2437 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which
2438 only restore operations with ``if_generation_match`` or ``generation`` set
2439 will be retried.
2440
2441 Users can configure non-default retry behavior. A ``None`` value will
2442 disable retries. A ``DEFAULT_RETRY`` value will enable retries
2443 even if restore operations are not guaranteed to be idempotent.
2444 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2445
2446 :rtype: :class:`google.cloud.storage.blob.Blob`
2447 :returns: The restored Blob.
2448 """
2449 with create_trace_span(name="Storage.Bucket.restore_blob"):
2450 client = self._require_client(client)
2451 query_params = {}
2452
2453 if self.user_project is not None:
2454 query_params["userProject"] = self.user_project
2455 if generation is not None:
2456 query_params["generation"] = generation
2457 if copy_source_acl is not None:
2458 query_params["copySourceAcl"] = copy_source_acl
2459 if projection is not None:
2460 query_params["projection"] = projection
2461
2462 _add_generation_match_parameters(
2463 query_params,
2464 if_generation_match=if_generation_match,
2465 if_generation_not_match=if_generation_not_match,
2466 if_metageneration_match=if_metageneration_match,
2467 if_metageneration_not_match=if_metageneration_not_match,
2468 )
2469
2470 blob = Blob(bucket=self, name=blob_name)
2471 api_response = client._post_resource(
2472 f"{blob.path}/restore",
2473 None,
2474 query_params=query_params,
2475 timeout=timeout,
2476 retry=retry,
2477 )
2478 blob._set_properties(api_response)
2479 return blob
2480
2481 @property
2482 def cors(self):
2483 """Retrieve or set CORS policies configured for this bucket.
2484
2485 See http://www.w3.org/TR/cors/ and
2486 https://cloud.google.com/storage/docs/json_api/v1/buckets
2487
2488 .. note::
2489
2490 The getter for this property returns a list which contains
2491 *copies* of the bucket's CORS policy mappings. Mutating the list
2492 or one of its dicts has no effect unless you then re-assign the
2493 dict via the setter. E.g.:
2494
2495 >>> policies = bucket.cors
2496 >>> policies.append({'origin': '/foo', ...})
2497 >>> policies[1]['maxAgeSeconds'] = 3600
2498 >>> del policies[0]
2499 >>> bucket.cors = policies
2500 >>> bucket.update()
2501
2502 :setter: Set CORS policies for this bucket.
2503 :getter: Gets the CORS policies for this bucket.
2504
2505 :rtype: list of dictionaries
2506 :returns: A sequence of mappings describing each CORS policy.
2507 """
2508 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())]
2509
2510 @cors.setter
2511 def cors(self, entries):
2512 """Set CORS policies configured for this bucket.
2513
2514 See http://www.w3.org/TR/cors/ and
2515 https://cloud.google.com/storage/docs/json_api/v1/buckets
2516
2517 :type entries: list of dictionaries
2518 :param entries: A sequence of mappings describing each CORS policy.
2519 """
2520 self._patch_property("cors", entries)
2521
2522 default_event_based_hold = _scalar_property("defaultEventBasedHold")
2523 """Are uploaded objects automatically placed under an even-based hold?
2524
2525 If True, uploaded objects will be placed under an event-based hold to
2526 be released at a future time. When released an object will then begin
2527 the retention period determined by the policy retention period for the
2528 object bucket.
2529
2530 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2531
2532 If the property is not set locally, returns ``None``.
2533
2534 :rtype: bool or ``NoneType``
2535 """
2536
2537 @property
2538 def default_kms_key_name(self):
2539 """Retrieve / set default KMS encryption key for objects in the bucket.
2540
2541 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2542
2543 :setter: Set default KMS encryption key for items in this bucket.
2544 :getter: Get default KMS encryption key for items in this bucket.
2545
2546 :rtype: str
2547 :returns: Default KMS encryption key, or ``None`` if not set.
2548 """
2549 encryption_config = self._properties.get("encryption", {})
2550 return encryption_config.get("defaultKmsKeyName")
2551
2552 @default_kms_key_name.setter
2553 def default_kms_key_name(self, value):
2554 """Set default KMS encryption key for objects in the bucket.
2555
2556 :type value: str or None
2557 :param value: new KMS key name (None to clear any existing key).
2558 """
2559 encryption_config = self._properties.get("encryption", {})
2560 encryption_config["defaultKmsKeyName"] = value
2561 self._patch_property("encryption", encryption_config)
2562
2563 @property
2564 def labels(self):
2565 """Retrieve or set labels assigned to this bucket.
2566
2567 See
2568 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2569
2570 .. note::
2571
2572 The getter for this property returns a dict which is a *copy*
2573 of the bucket's labels. Mutating that dict has no effect unless
2574 you then re-assign the dict via the setter. E.g.:
2575
2576 >>> labels = bucket.labels
2577 >>> labels['new_key'] = 'some-label'
2578 >>> del labels['old_key']
2579 >>> bucket.labels = labels
2580 >>> bucket.update()
2581
2582 :setter: Set labels for this bucket.
2583 :getter: Gets the labels for this bucket.
2584
2585 :rtype: :class:`dict`
2586 :returns: Name-value pairs (string->string) labelling the bucket.
2587 """
2588 labels = self._properties.get("labels")
2589 if labels is None:
2590 return {}
2591 return copy.deepcopy(labels)
2592
2593 @labels.setter
2594 def labels(self, mapping):
2595 """Set labels assigned to this bucket.
2596
2597 See
2598 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2599
2600 :type mapping: :class:`dict`
2601 :param mapping: Name-value pairs (string->string) labelling the bucket.
2602 """
2603 # If any labels have been expressly removed, we need to track this
2604 # so that a future .patch() call can do the correct thing.
2605 existing = set([k for k in self.labels.keys()])
2606 incoming = set([k for k in mapping.keys()])
2607 self._label_removals = self._label_removals.union(existing.difference(incoming))
2608 mapping = {k: str(v) for k, v in mapping.items()}
2609
2610 # Actually update the labels on the object.
2611 self._patch_property("labels", copy.deepcopy(mapping))
2612
2613 @property
2614 def etag(self):
2615 """Retrieve the ETag for the bucket.
2616
2617 See https://tools.ietf.org/html/rfc2616#section-3.11 and
2618 https://cloud.google.com/storage/docs/json_api/v1/buckets
2619
2620 :rtype: str or ``NoneType``
2621 :returns: The bucket etag or ``None`` if the bucket's
2622 resource has not been loaded from the server.
2623 """
2624 return self._properties.get("etag")
2625
2626 @property
2627 def id(self):
2628 """Retrieve the ID for the bucket.
2629
2630 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2631
2632 :rtype: str or ``NoneType``
2633 :returns: The ID of the bucket or ``None`` if the bucket's
2634 resource has not been loaded from the server.
2635 """
2636 return self._properties.get("id")
2637
2638 @property
2639 def iam_configuration(self):
2640 """Retrieve IAM configuration for this bucket.
2641
2642 :rtype: :class:`IAMConfiguration`
2643 :returns: an instance for managing the bucket's IAM configuration.
2644 """
2645 info = self._properties.get("iamConfiguration", {})
2646 return IAMConfiguration.from_api_repr(info, self)
2647
2648 @property
2649 def soft_delete_policy(self):
2650 """Retrieve the soft delete policy for this bucket.
2651
2652 See https://cloud.google.com/storage/docs/soft-delete
2653
2654 :rtype: :class:`SoftDeletePolicy`
2655 :returns: an instance for managing the bucket's soft delete policy.
2656 """
2657 policy = self._properties.get("softDeletePolicy", {})
2658 return SoftDeletePolicy.from_api_repr(policy, self)
2659
2660 @property
2661 def lifecycle_rules(self):
2662 """Retrieve or set lifecycle rules configured for this bucket.
2663
2664 See https://cloud.google.com/storage/docs/lifecycle and
2665 https://cloud.google.com/storage/docs/json_api/v1/buckets
2666
2667 .. note::
2668
2669 The getter for this property returns a generator which yields
2670 *copies* of the bucket's lifecycle rules mappings. Mutating the
2671 output dicts has no effect unless you then re-assign the dict via
2672 the setter. E.g.:
2673
2674 >>> rules = list(bucket.lifecycle_rules)
2675 >>> rules.append({'origin': '/foo', ...})
2676 >>> rules[1]['rule']['action']['type'] = 'Delete'
2677 >>> del rules[0]
2678 >>> bucket.lifecycle_rules = rules
2679 >>> bucket.update()
2680
2681 :setter: Set lifecycle rules for this bucket.
2682 :getter: Gets the lifecycle rules for this bucket.
2683
2684 :rtype: generator(dict)
2685 :returns: A sequence of mappings describing each lifecycle rule.
2686 """
2687 info = self._properties.get("lifecycle", {})
2688 for rule in info.get("rule", ()):
2689 action_type = rule["action"]["type"]
2690 if action_type == "Delete":
2691 yield LifecycleRuleDelete.from_api_repr(rule)
2692 elif action_type == "SetStorageClass":
2693 yield LifecycleRuleSetStorageClass.from_api_repr(rule)
2694 elif action_type == "AbortIncompleteMultipartUpload":
2695 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule)
2696 else:
2697 warnings.warn(
2698 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format(
2699 rule
2700 ),
2701 UserWarning,
2702 stacklevel=1,
2703 )
2704
2705 @lifecycle_rules.setter
2706 def lifecycle_rules(self, rules):
2707 """Set lifecycle rules configured for this bucket.
2708
2709 See https://cloud.google.com/storage/docs/lifecycle and
2710 https://cloud.google.com/storage/docs/json_api/v1/buckets
2711
2712 :type rules: list of dictionaries
2713 :param rules: A sequence of mappings describing each lifecycle rule.
2714 """
2715 rules = [dict(rule) for rule in rules] # Convert helpers if needed
2716 self._patch_property("lifecycle", {"rule": rules})
2717
2718 def clear_lifecycle_rules(self):
2719 """Clear lifecycle rules configured for this bucket.
2720
2721 See https://cloud.google.com/storage/docs/lifecycle and
2722 https://cloud.google.com/storage/docs/json_api/v1/buckets
2723 """
2724 self.lifecycle_rules = []
2725
2726 def clear_lifecyle_rules(self):
2727 """Deprecated alias for clear_lifecycle_rules."""
2728 return self.clear_lifecycle_rules()
2729
2730 def add_lifecycle_delete_rule(self, **kw):
2731 """Add a "delete" rule to lifecycle rules configured for this bucket.
2732
2733 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2734 which is set on the bucket. For the general format of a lifecycle configuration, see the
2735 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2736 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python).
2737
2738 :type kw: dict
2739 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2740 """
2741 rules = list(self.lifecycle_rules)
2742 rules.append(LifecycleRuleDelete(**kw))
2743 self.lifecycle_rules = rules
2744
2745 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
2746 """Add a "set storage class" rule to lifecycle rules.
2747
2748 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2749 which is set on the bucket. For the general format of a lifecycle configuration, see the
2750 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2751
2752 :type storage_class: str, one of :attr:`STORAGE_CLASSES`.
2753 :param storage_class: new storage class to assign to matching items.
2754
2755 :type kw: dict
2756 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2757 """
2758 rules = list(self.lifecycle_rules)
2759 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
2760 self.lifecycle_rules = rules
2761
2762 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw):
2763 """Add a "abort incomplete multipart upload" rule to lifecycle rules.
2764
2765 .. note::
2766 The "age" lifecycle condition is the only supported condition
2767 for this rule.
2768
2769 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2770 which is set on the bucket. For the general format of a lifecycle configuration, see the
2771 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2772
2773 :type kw: dict
2774 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2775 """
2776 rules = list(self.lifecycle_rules)
2777 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw))
2778 self.lifecycle_rules = rules
2779
2780 _location = _scalar_property("location")
2781
2782 @property
2783 def location(self):
2784 """Retrieve location configured for this bucket.
2785
2786 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2787 https://cloud.google.com/storage/docs/locations
2788
2789 Returns ``None`` if the property has not been set before creation,
2790 or if the bucket's resource has not been loaded from the server.
2791 :rtype: str or ``NoneType``
2792 """
2793 return self._location
2794
2795 @location.setter
2796 def location(self, value):
2797 """(Deprecated) Set `Bucket.location`
2798
2799 This can only be set at bucket **creation** time.
2800
2801 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2802 https://cloud.google.com/storage/docs/bucket-locations
2803
2804 .. warning::
2805
2806 Assignment to 'Bucket.location' is deprecated, as it is only
2807 valid before the bucket is created. Instead, pass the location
2808 to `Bucket.create`.
2809 """
2810 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
2811 self._location = value
2812
2813 @property
2814 def data_locations(self):
2815 """Retrieve the list of regional locations for custom dual-region buckets.
2816
2817 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2818 https://cloud.google.com/storage/docs/locations
2819
2820 Returns ``None`` if the property has not been set before creation,
2821 if the bucket's resource has not been loaded from the server,
2822 or if the bucket is not a dual-regions bucket.
2823 :rtype: list of str or ``NoneType``
2824 """
2825 custom_placement_config = self._properties.get("customPlacementConfig", {})
2826 return custom_placement_config.get("dataLocations")
2827
2828 @property
2829 def location_type(self):
2830 """Retrieve the location type for the bucket.
2831
2832 See https://cloud.google.com/storage/docs/storage-classes
2833
2834 :getter: Gets the the location type for this bucket.
2835
2836 :rtype: str or ``NoneType``
2837 :returns:
2838 If set, one of
2839 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`,
2840 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or
2841 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`,
2842 else ``None``.
2843 """
2844 return self._properties.get("locationType")
2845
2846 def get_logging(self):
2847 """Return info about access logging for this bucket.
2848
2849 See https://cloud.google.com/storage/docs/access-logs#status
2850
2851 :rtype: dict or None
2852 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
2853 (if logging is enabled), or None (if not).
2854 """
2855 info = self._properties.get("logging")
2856 return copy.deepcopy(info)
2857
2858 def enable_logging(self, bucket_name, object_prefix=""):
2859 """Enable access logging for this bucket.
2860
2861 See https://cloud.google.com/storage/docs/access-logs
2862
2863 :type bucket_name: str
2864 :param bucket_name: name of bucket in which to store access logs
2865
2866 :type object_prefix: str
2867 :param object_prefix: prefix for access log filenames
2868 """
2869 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
2870 self._patch_property("logging", info)
2871
2872 def disable_logging(self):
2873 """Disable access logging for this bucket.
2874
2875 See https://cloud.google.com/storage/docs/access-logs#disabling
2876 """
2877 self._patch_property("logging", None)
2878
2879 @property
2880 def metageneration(self):
2881 """Retrieve the metageneration for the bucket.
2882
2883 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2884
2885 :rtype: int or ``NoneType``
2886 :returns: The metageneration of the bucket or ``None`` if the bucket's
2887 resource has not been loaded from the server.
2888 """
2889 metageneration = self._properties.get("metageneration")
2890 if metageneration is not None:
2891 return int(metageneration)
2892
2893 @property
2894 def owner(self):
2895 """Retrieve info about the owner of the bucket.
2896
2897 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2898
2899 :rtype: dict or ``NoneType``
2900 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
2901 resource has not been loaded from the server.
2902 """
2903 return copy.deepcopy(self._properties.get("owner"))
2904
2905 @property
2906 def project_number(self):
2907 """Retrieve the number of the project to which the bucket is assigned.
2908
2909 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2910
2911 :rtype: int or ``NoneType``
2912 :returns: The project number that owns the bucket or ``None`` if
2913 the bucket's resource has not been loaded from the server.
2914 """
2915 project_number = self._properties.get("projectNumber")
2916 if project_number is not None:
2917 return int(project_number)
2918
2919 @property
2920 def retention_policy_effective_time(self):
2921 """Retrieve the effective time of the bucket's retention policy.
2922
2923 :rtype: datetime.datetime or ``NoneType``
2924 :returns: point-in time at which the bucket's retention policy is
2925 effective, or ``None`` if the property is not
2926 set locally.
2927 """
2928 policy = self._properties.get("retentionPolicy")
2929 if policy is not None:
2930 timestamp = policy.get("effectiveTime")
2931 if timestamp is not None:
2932 return _rfc3339_nanos_to_datetime(timestamp)
2933
2934 @property
2935 def retention_policy_locked(self):
2936 """Retrieve whthere the bucket's retention policy is locked.
2937
2938 :rtype: bool
2939 :returns: True if the bucket's policy is locked, or else False
2940 if the policy is not locked, or the property is not
2941 set locally.
2942 """
2943 policy = self._properties.get("retentionPolicy")
2944 if policy is not None:
2945 return policy.get("isLocked")
2946
2947 @property
2948 def retention_period(self):
2949 """Retrieve or set the retention period for items in the bucket.
2950
2951 :rtype: int or ``NoneType``
2952 :returns: number of seconds to retain items after upload or release
2953 from event-based lock, or ``None`` if the property is not
2954 set locally.
2955 """
2956 policy = self._properties.get("retentionPolicy")
2957 if policy is not None:
2958 period = policy.get("retentionPeriod")
2959 if period is not None:
2960 return int(period)
2961
2962 @retention_period.setter
2963 def retention_period(self, value):
2964 """Set the retention period for items in the bucket.
2965
2966 :type value: int
2967 :param value:
2968 number of seconds to retain items after upload or release from
2969 event-based lock.
2970
2971 :raises ValueError: if the bucket's retention policy is locked.
2972 """
2973 policy = self._properties.setdefault("retentionPolicy", {})
2974 if value is not None:
2975 policy["retentionPeriod"] = str(value)
2976 else:
2977 policy = None
2978 self._patch_property("retentionPolicy", policy)
2979
2980 @property
2981 def self_link(self):
2982 """Retrieve the URI for the bucket.
2983
2984 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2985
2986 :rtype: str or ``NoneType``
2987 :returns: The self link for the bucket or ``None`` if
2988 the bucket's resource has not been loaded from the server.
2989 """
2990 return self._properties.get("selfLink")
2991
2992 @property
2993 def storage_class(self):
2994 """Retrieve or set the storage class for the bucket.
2995
2996 See https://cloud.google.com/storage/docs/storage-classes
2997
2998 :setter: Set the storage class for this bucket.
2999 :getter: Gets the the storage class for this bucket.
3000
3001 :rtype: str or ``NoneType``
3002 :returns:
3003 If set, one of
3004 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3005 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3006 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3007 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3008 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3009 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3010 or
3011 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3012 else ``None``.
3013 """
3014 return self._properties.get("storageClass")
3015
3016 @storage_class.setter
3017 def storage_class(self, value):
3018 """Set the storage class for the bucket.
3019
3020 See https://cloud.google.com/storage/docs/storage-classes
3021
3022 :type value: str
3023 :param value:
3024 One of
3025 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3026 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3027 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3028 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3029 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3030 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3031 or
3032 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3033 """
3034 self._patch_property("storageClass", value)
3035
3036 @property
3037 def time_created(self):
3038 """Retrieve the timestamp at which the bucket was created.
3039
3040 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3041
3042 :rtype: :class:`datetime.datetime` or ``NoneType``
3043 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3044 ``None`` if the bucket's resource has not been loaded
3045 from the server.
3046 """
3047 value = self._properties.get("timeCreated")
3048 if value is not None:
3049 return _rfc3339_nanos_to_datetime(value)
3050
3051 @property
3052 def updated(self):
3053 """Retrieve the timestamp at which the bucket was last updated.
3054
3055 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3056
3057 :rtype: :class:`datetime.datetime` or ``NoneType``
3058 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3059 ``None`` if the bucket's resource has not been loaded
3060 from the server.
3061 """
3062 value = self._properties.get("updated")
3063 if value is not None:
3064 return _rfc3339_nanos_to_datetime(value)
3065
3066 @property
3067 def versioning_enabled(self):
3068 """Is versioning enabled for this bucket?
3069
3070 See https://cloud.google.com/storage/docs/object-versioning for
3071 details.
3072
3073 :setter: Update whether versioning is enabled for this bucket.
3074 :getter: Query whether versioning is enabled for this bucket.
3075
3076 :rtype: bool
3077 :returns: True if enabled, else False.
3078 """
3079 versioning = self._properties.get("versioning", {})
3080 return versioning.get("enabled", False)
3081
3082 @versioning_enabled.setter
3083 def versioning_enabled(self, value):
3084 """Enable versioning for this bucket.
3085
3086 See https://cloud.google.com/storage/docs/object-versioning for
3087 details.
3088
3089 :type value: convertible to boolean
3090 :param value: should versioning be enabled for the bucket?
3091 """
3092 self._patch_property("versioning", {"enabled": bool(value)})
3093
3094 @property
3095 def requester_pays(self):
3096 """Does the requester pay for API requests for this bucket?
3097
3098 See https://cloud.google.com/storage/docs/requester-pays for
3099 details.
3100
3101 :setter: Update whether requester pays for this bucket.
3102 :getter: Query whether requester pays for this bucket.
3103
3104 :rtype: bool
3105 :returns: True if requester pays for API requests for the bucket,
3106 else False.
3107 """
3108 versioning = self._properties.get("billing", {})
3109 return versioning.get("requesterPays", False)
3110
3111 @requester_pays.setter
3112 def requester_pays(self, value):
3113 """Update whether requester pays for API requests for this bucket.
3114
3115 See https://cloud.google.com/storage/docs/using-requester-pays for
3116 details.
3117
3118 :type value: convertible to boolean
3119 :param value: should requester pay for API requests for the bucket?
3120 """
3121 self._patch_property("billing", {"requesterPays": bool(value)})
3122
3123 @property
3124 def autoclass_enabled(self):
3125 """Whether Autoclass is enabled for this bucket.
3126
3127 See https://cloud.google.com/storage/docs/using-autoclass for details.
3128
3129 :setter: Update whether autoclass is enabled for this bucket.
3130 :getter: Query whether autoclass is enabled for this bucket.
3131
3132 :rtype: bool
3133 :returns: True if enabled, else False.
3134 """
3135 autoclass = self._properties.get("autoclass", {})
3136 return autoclass.get("enabled", False)
3137
3138 @autoclass_enabled.setter
3139 def autoclass_enabled(self, value):
3140 """Enable or disable Autoclass at the bucket-level.
3141
3142 See https://cloud.google.com/storage/docs/using-autoclass for details.
3143
3144 :type value: convertible to boolean
3145 :param value: If true, enable Autoclass for this bucket.
3146 If false, disable Autoclass for this bucket.
3147 """
3148 autoclass = self._properties.get("autoclass", {})
3149 autoclass["enabled"] = bool(value)
3150 self._patch_property("autoclass", autoclass)
3151
3152 @property
3153 def autoclass_toggle_time(self):
3154 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket.
3155 :rtype: datetime.datetime or ``NoneType``
3156 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally.
3157 """
3158 autoclass = self._properties.get("autoclass")
3159 if autoclass is not None:
3160 timestamp = autoclass.get("toggleTime")
3161 if timestamp is not None:
3162 return _rfc3339_nanos_to_datetime(timestamp)
3163
3164 @property
3165 def autoclass_terminal_storage_class(self):
3166 """The storage class that objects in an Autoclass bucket eventually transition to if
3167 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3168
3169 See https://cloud.google.com/storage/docs/using-autoclass for details.
3170
3171 :setter: Set the terminal storage class for Autoclass configuration.
3172 :getter: Get the terminal storage class for Autoclass configuration.
3173
3174 :rtype: str
3175 :returns: The terminal storage class if Autoclass is enabled, else ``None``.
3176 """
3177 autoclass = self._properties.get("autoclass", {})
3178 return autoclass.get("terminalStorageClass", None)
3179
3180 @autoclass_terminal_storage_class.setter
3181 def autoclass_terminal_storage_class(self, value):
3182 """The storage class that objects in an Autoclass bucket eventually transition to if
3183 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3184
3185 See https://cloud.google.com/storage/docs/using-autoclass for details.
3186
3187 :type value: str
3188 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`.
3189 """
3190 autoclass = self._properties.get("autoclass", {})
3191 autoclass["terminalStorageClass"] = value
3192 self._patch_property("autoclass", autoclass)
3193
3194 @property
3195 def autoclass_terminal_storage_class_update_time(self):
3196 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket
3197 :rtype: datetime.datetime or ``NoneType``
3198 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally.
3199 """
3200 autoclass = self._properties.get("autoclass")
3201 if autoclass is not None:
3202 timestamp = autoclass.get("terminalStorageClassUpdateTime")
3203 if timestamp is not None:
3204 return _rfc3339_nanos_to_datetime(timestamp)
3205
3206 @property
3207 def object_retention_mode(self):
3208 """Retrieve the object retention mode set on the bucket.
3209
3210 :rtype: str
3211 :returns: When set to Enabled, retention configurations can be
3212 set on objects in the bucket.
3213 """
3214 object_retention = self._properties.get("objectRetention")
3215 if object_retention is not None:
3216 return object_retention.get("mode")
3217
3218 @property
3219 def hierarchical_namespace_enabled(self):
3220 """Whether hierarchical namespace is enabled for this bucket.
3221
3222 :setter: Update whether hierarchical namespace is enabled for this bucket.
3223 :getter: Query whether hierarchical namespace is enabled for this bucket.
3224
3225 :rtype: bool
3226 :returns: True if enabled, else False.
3227 """
3228 hns = self._properties.get("hierarchicalNamespace", {})
3229 return hns.get("enabled")
3230
3231 @hierarchical_namespace_enabled.setter
3232 def hierarchical_namespace_enabled(self, value):
3233 """Enable or disable hierarchical namespace at the bucket-level.
3234
3235 :type value: convertible to boolean
3236 :param value: If true, enable hierarchical namespace for this bucket.
3237 If false, disable hierarchical namespace for this bucket.
3238
3239 .. note::
3240 To enable hierarchical namespace, you must set it at bucket creation time.
3241 Currently, hierarchical namespace configuration cannot be changed after bucket creation.
3242 """
3243 hns = self._properties.get("hierarchicalNamespace", {})
3244 hns["enabled"] = bool(value)
3245 self._patch_property("hierarchicalNamespace", hns)
3246
3247 def configure_website(self, main_page_suffix=None, not_found_page=None):
3248 """Configure website-related properties.
3249
3250 See https://cloud.google.com/storage/docs/static-website
3251
3252 .. note::
3253 This configures the bucket's website-related properties,controlling how
3254 the service behaves when accessing bucket contents as a web site.
3255 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and
3256 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python)
3257 for more information.
3258
3259 :type main_page_suffix: str
3260 :param main_page_suffix: The page to use as the main page
3261 of a directory.
3262 Typically something like index.html.
3263
3264 :type not_found_page: str
3265 :param not_found_page: The file to use when a page isn't found.
3266 """
3267 data = {
3268 "mainPageSuffix": main_page_suffix,
3269 "notFoundPage": not_found_page,
3270 }
3271 self._patch_property("website", data)
3272
3273 def disable_website(self):
3274 """Disable the website configuration for this bucket.
3275
3276 This is really just a shortcut for setting the website-related
3277 attributes to ``None``.
3278 """
3279 return self.configure_website(None, None)
3280
3281 def get_iam_policy(
3282 self,
3283 client=None,
3284 requested_policy_version=None,
3285 timeout=_DEFAULT_TIMEOUT,
3286 retry=DEFAULT_RETRY,
3287 ):
3288 """Retrieve the IAM policy for the bucket.
3289
3290 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy)
3291 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python).
3292
3293 If :attr:`user_project` is set, bills the API request to that project.
3294
3295 :type client: :class:`~google.cloud.storage.client.Client` or
3296 ``NoneType``
3297 :param client: (Optional) The client to use. If not passed, falls back
3298 to the ``client`` stored on the current bucket.
3299
3300 :type requested_policy_version: int or ``NoneType``
3301 :param requested_policy_version: (Optional) The version of IAM policies to request.
3302 If a policy with a condition is requested without
3303 setting this, the server will return an error.
3304 This must be set to a value of 3 to retrieve IAM
3305 policies containing conditions. This is to prevent
3306 client code that isn't aware of IAM conditions from
3307 interpreting and modifying policies incorrectly.
3308 The service might return a policy with version lower
3309 than the one that was requested, based on the
3310 feature syntax in the policy fetched.
3311
3312 :type timeout: float or tuple
3313 :param timeout:
3314 (Optional) The amount of time, in seconds, to wait
3315 for the server response. See: :ref:`configuring_timeouts`
3316
3317 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3318 :param retry:
3319 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3320
3321 :rtype: :class:`google.api_core.iam.Policy`
3322 :returns: the policy instance, based on the resource returned from
3323 the ``getIamPolicy`` API request.
3324 """
3325 with create_trace_span(name="Storage.Bucket.getIamPolicy"):
3326 client = self._require_client(client)
3327 query_params = {}
3328
3329 if self.user_project is not None:
3330 query_params["userProject"] = self.user_project
3331
3332 if requested_policy_version is not None:
3333 query_params["optionsRequestedPolicyVersion"] = requested_policy_version
3334
3335 info = client._get_resource(
3336 f"{self.path}/iam",
3337 query_params=query_params,
3338 timeout=timeout,
3339 retry=retry,
3340 _target_object=None,
3341 )
3342 return Policy.from_api_repr(info)
3343
3344 def set_iam_policy(
3345 self,
3346 policy,
3347 client=None,
3348 timeout=_DEFAULT_TIMEOUT,
3349 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON,
3350 ):
3351 """Update the IAM policy for the bucket.
3352
3353 See
3354 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
3355
3356 If :attr:`user_project` is set, bills the API request to that project.
3357
3358 :type policy: :class:`google.api_core.iam.Policy`
3359 :param policy: policy instance used to update bucket's IAM policy.
3360
3361 :type client: :class:`~google.cloud.storage.client.Client` or
3362 ``NoneType``
3363 :param client: (Optional) The client to use. If not passed, falls back
3364 to the ``client`` stored on the current bucket.
3365
3366 :type timeout: float or tuple
3367 :param timeout:
3368 (Optional) The amount of time, in seconds, to wait
3369 for the server response. See: :ref:`configuring_timeouts`
3370
3371 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3372 :param retry:
3373 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3374
3375 :rtype: :class:`google.api_core.iam.Policy`
3376 :returns: the policy instance, based on the resource returned from
3377 the ``setIamPolicy`` API request.
3378 """
3379 with create_trace_span(name="Storage.Bucket.setIamPolicy"):
3380 client = self._require_client(client)
3381 query_params = {}
3382
3383 if self.user_project is not None:
3384 query_params["userProject"] = self.user_project
3385
3386 path = f"{self.path}/iam"
3387 resource = policy.to_api_repr()
3388 resource["resourceId"] = self.path
3389
3390 info = client._put_resource(
3391 path,
3392 resource,
3393 query_params=query_params,
3394 timeout=timeout,
3395 retry=retry,
3396 _target_object=None,
3397 )
3398
3399 return Policy.from_api_repr(info)
3400
3401 def test_iam_permissions(
3402 self,
3403 permissions,
3404 client=None,
3405 timeout=_DEFAULT_TIMEOUT,
3406 retry=DEFAULT_RETRY,
3407 ):
3408 """API call: test permissions
3409
3410 See
3411 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
3412
3413 If :attr:`user_project` is set, bills the API request to that project.
3414
3415 :type permissions: list of string
3416 :param permissions: the permissions to check
3417
3418 :type client: :class:`~google.cloud.storage.client.Client` or
3419 ``NoneType``
3420 :param client: (Optional) The client to use. If not passed, falls back
3421 to the ``client`` stored on the current bucket.
3422
3423 :type timeout: float or tuple
3424 :param timeout:
3425 (Optional) The amount of time, in seconds, to wait
3426 for the server response. See: :ref:`configuring_timeouts`
3427
3428 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3429 :param retry:
3430 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3431
3432 :rtype: list of string
3433 :returns: the permissions returned by the ``testIamPermissions`` API
3434 request.
3435 """
3436 with create_trace_span(name="Storage.Bucket.testIamPermissions"):
3437 client = self._require_client(client)
3438 query_params = {"permissions": permissions}
3439
3440 if self.user_project is not None:
3441 query_params["userProject"] = self.user_project
3442
3443 path = f"{self.path}/iam/testPermissions"
3444 resp = client._get_resource(
3445 path,
3446 query_params=query_params,
3447 timeout=timeout,
3448 retry=retry,
3449 _target_object=None,
3450 )
3451 return resp.get("permissions", [])
3452
3453 def make_public(
3454 self,
3455 recursive=False,
3456 future=False,
3457 client=None,
3458 timeout=_DEFAULT_TIMEOUT,
3459 if_metageneration_match=None,
3460 if_metageneration_not_match=None,
3461 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3462 ):
3463 """Update bucket's ACL, granting read access to anonymous users.
3464
3465 :type recursive: bool
3466 :param recursive: If True, this will make all blobs inside the bucket
3467 public as well.
3468
3469 :type future: bool
3470 :param future: If True, this will make all objects created in the
3471 future public as well.
3472
3473 :type client: :class:`~google.cloud.storage.client.Client` or
3474 ``NoneType``
3475 :param client: (Optional) The client to use. If not passed, falls back
3476 to the ``client`` stored on the current bucket.
3477 :type timeout: float or tuple
3478 :param timeout:
3479 (Optional) The amount of time, in seconds, to wait
3480 for the server response. See: :ref:`configuring_timeouts`
3481
3482 :type if_metageneration_match: long
3483 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3484 blob's current metageneration matches the given value.
3485
3486 :type if_metageneration_not_match: long
3487 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3488 blob's current metageneration does not match the given value.
3489
3490 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3491 :param retry:
3492 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3493
3494 :raises ValueError:
3495 If ``recursive`` is True, and the bucket contains more than 256
3496 blobs. This is to prevent extremely long runtime of this
3497 method. For such buckets, iterate over the blobs returned by
3498 :meth:`list_blobs` and call
3499 :meth:`~google.cloud.storage.blob.Blob.make_public`
3500 for each blob.
3501 """
3502 with create_trace_span(name="Storage.Bucket.makePublic"):
3503 self.acl.all().grant_read()
3504 self.acl.save(
3505 client=client,
3506 timeout=timeout,
3507 if_metageneration_match=if_metageneration_match,
3508 if_metageneration_not_match=if_metageneration_not_match,
3509 retry=retry,
3510 )
3511
3512 if future:
3513 doa = self.default_object_acl
3514 if not doa.loaded:
3515 doa.reload(client=client, timeout=timeout)
3516 doa.all().grant_read()
3517 doa.save(
3518 client=client,
3519 timeout=timeout,
3520 if_metageneration_match=if_metageneration_match,
3521 if_metageneration_not_match=if_metageneration_not_match,
3522 retry=retry,
3523 )
3524
3525 if recursive:
3526 blobs = list(
3527 self.list_blobs(
3528 projection="full",
3529 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3530 client=client,
3531 timeout=timeout,
3532 )
3533 )
3534 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3535 message = (
3536 "Refusing to make public recursively with more than "
3537 "%d objects. If you actually want to make every object "
3538 "in this bucket public, iterate through the blobs "
3539 "returned by 'Bucket.list_blobs()' and call "
3540 "'make_public' on each one."
3541 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3542 raise ValueError(message)
3543
3544 for blob in blobs:
3545 blob.acl.all().grant_read()
3546 blob.acl.save(
3547 client=client,
3548 timeout=timeout,
3549 )
3550
3551 def make_private(
3552 self,
3553 recursive=False,
3554 future=False,
3555 client=None,
3556 timeout=_DEFAULT_TIMEOUT,
3557 if_metageneration_match=None,
3558 if_metageneration_not_match=None,
3559 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3560 ):
3561 """Update bucket's ACL, revoking read access for anonymous users.
3562
3563 :type recursive: bool
3564 :param recursive: If True, this will make all blobs inside the bucket
3565 private as well.
3566
3567 :type future: bool
3568 :param future: If True, this will make all objects created in the
3569 future private as well.
3570
3571 :type client: :class:`~google.cloud.storage.client.Client` or
3572 ``NoneType``
3573 :param client: (Optional) The client to use. If not passed, falls back
3574 to the ``client`` stored on the current bucket.
3575
3576 :type timeout: float or tuple
3577 :param timeout:
3578 (Optional) The amount of time, in seconds, to wait
3579 for the server response. See: :ref:`configuring_timeouts`
3580
3581 :type if_metageneration_match: long
3582 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3583 blob's current metageneration matches the given value.
3584 :type if_metageneration_not_match: long
3585 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3586 blob's current metageneration does not match the given value.
3587 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3588 :param retry:
3589 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3590
3591 :raises ValueError:
3592 If ``recursive`` is True, and the bucket contains more than 256
3593 blobs. This is to prevent extremely long runtime of this
3594 method. For such buckets, iterate over the blobs returned by
3595 :meth:`list_blobs` and call
3596 :meth:`~google.cloud.storage.blob.Blob.make_private`
3597 for each blob.
3598 """
3599 with create_trace_span(name="Storage.Bucket.makePrivate"):
3600 self.acl.all().revoke_read()
3601 self.acl.save(
3602 client=client,
3603 timeout=timeout,
3604 if_metageneration_match=if_metageneration_match,
3605 if_metageneration_not_match=if_metageneration_not_match,
3606 retry=retry,
3607 )
3608
3609 if future:
3610 doa = self.default_object_acl
3611 if not doa.loaded:
3612 doa.reload(client=client, timeout=timeout)
3613 doa.all().revoke_read()
3614 doa.save(
3615 client=client,
3616 timeout=timeout,
3617 if_metageneration_match=if_metageneration_match,
3618 if_metageneration_not_match=if_metageneration_not_match,
3619 retry=retry,
3620 )
3621
3622 if recursive:
3623 blobs = list(
3624 self.list_blobs(
3625 projection="full",
3626 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3627 client=client,
3628 timeout=timeout,
3629 )
3630 )
3631 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3632 message = (
3633 "Refusing to make private recursively with more than "
3634 "%d objects. If you actually want to make every object "
3635 "in this bucket private, iterate through the blobs "
3636 "returned by 'Bucket.list_blobs()' and call "
3637 "'make_private' on each one."
3638 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3639 raise ValueError(message)
3640
3641 for blob in blobs:
3642 blob.acl.all().revoke_read()
3643 blob.acl.save(client=client, timeout=timeout)
3644
3645 def generate_upload_policy(self, conditions, expiration=None, client=None):
3646 """Create a signed upload policy for uploading objects.
3647
3648 This method generates and signs a policy document. You can use
3649 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms)
3650 to allow visitors to a website to upload files to
3651 Google Cloud Storage without giving them direct write access.
3652 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python).
3653
3654 :type expiration: datetime
3655 :param expiration: (Optional) Expiration in UTC. If not specified, the
3656 policy will expire in 1 hour.
3657
3658 :type conditions: list
3659 :param conditions: A list of conditions as described in the
3660 `policy documents` documentation.
3661
3662 :type client: :class:`~google.cloud.storage.client.Client`
3663 :param client: (Optional) The client to use. If not passed, falls back
3664 to the ``client`` stored on the current bucket.
3665
3666 :rtype: dict
3667 :returns: A dictionary of (form field name, form field value) of form
3668 fields that should be added to your HTML upload form in order
3669 to attach the signature.
3670 """
3671 client = self._require_client(client)
3672 credentials = client._credentials
3673 _signing.ensure_signed_credentials(credentials)
3674
3675 if expiration is None:
3676 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1)
3677
3678 conditions = conditions + [{"bucket": self.name}]
3679
3680 policy_document = {
3681 "expiration": _datetime_to_rfc3339(expiration),
3682 "conditions": conditions,
3683 }
3684
3685 encoded_policy_document = base64.b64encode(
3686 json.dumps(policy_document).encode("utf-8")
3687 )
3688 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
3689
3690 fields = {
3691 "bucket": self.name,
3692 "GoogleAccessId": credentials.signer_email,
3693 "policy": encoded_policy_document.decode("utf-8"),
3694 "signature": signature.decode("utf-8"),
3695 }
3696
3697 return fields
3698
3699 def lock_retention_policy(
3700 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3701 ):
3702 """Lock the bucket's retention policy.
3703
3704 :type client: :class:`~google.cloud.storage.client.Client` or
3705 ``NoneType``
3706 :param client: (Optional) The client to use. If not passed, falls back
3707 to the ``client`` stored on the blob's bucket.
3708
3709 :type timeout: float or tuple
3710 :param timeout:
3711 (Optional) The amount of time, in seconds, to wait
3712 for the server response. See: :ref:`configuring_timeouts`
3713
3714 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3715 :param retry:
3716 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3717
3718 :raises ValueError:
3719 if the bucket has no metageneration (i.e., new or never reloaded);
3720 if the bucket has no retention policy assigned;
3721 if the bucket's retention policy is already locked.
3722 """
3723 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"):
3724 if "metageneration" not in self._properties:
3725 raise ValueError(
3726 "Bucket has no retention policy assigned: try 'reload'?"
3727 )
3728
3729 policy = self._properties.get("retentionPolicy")
3730
3731 if policy is None:
3732 raise ValueError(
3733 "Bucket has no retention policy assigned: try 'reload'?"
3734 )
3735
3736 if policy.get("isLocked"):
3737 raise ValueError("Bucket's retention policy is already locked.")
3738
3739 client = self._require_client(client)
3740
3741 query_params = {"ifMetagenerationMatch": self.metageneration}
3742
3743 if self.user_project is not None:
3744 query_params["userProject"] = self.user_project
3745
3746 path = f"/b/{self.name}/lockRetentionPolicy"
3747 api_response = client._post_resource(
3748 path,
3749 None,
3750 query_params=query_params,
3751 timeout=timeout,
3752 retry=retry,
3753 _target_object=self,
3754 )
3755 self._set_properties(api_response)
3756
3757 def generate_signed_url(
3758 self,
3759 expiration=None,
3760 api_access_endpoint=None,
3761 method="GET",
3762 headers=None,
3763 query_parameters=None,
3764 client=None,
3765 credentials=None,
3766 version=None,
3767 virtual_hosted_style=False,
3768 bucket_bound_hostname=None,
3769 scheme="http",
3770 ):
3771 """Generates a signed URL for this bucket.
3772
3773 .. note::
3774
3775 If you are on Google Compute Engine, you can't generate a signed
3776 URL using GCE service account. If you'd like to be able to generate
3777 a signed URL from GCE, you can use a standard service account from a
3778 JSON file rather than a GCE service account.
3779
3780 If you have a bucket that you want to allow access to for a set
3781 amount of time, you can use this method to generate a URL that
3782 is only valid within a certain time period.
3783
3784 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`,
3785 ``https`` works only if using a ``CDN``.
3786
3787 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
3788 :param expiration: Point in time when the signed URL should expire. If
3789 a ``datetime`` instance is passed without an explicit
3790 ``tzinfo`` set, it will be assumed to be ``UTC``.
3791
3792 :type api_access_endpoint: str
3793 :param api_access_endpoint: (Optional) URI base, for instance
3794 "https://storage.googleapis.com". If not specified, the client's
3795 api_endpoint will be used. Incompatible with bucket_bound_hostname.
3796
3797 :type method: str
3798 :param method: The HTTP verb that will be used when requesting the URL.
3799
3800 :type headers: dict
3801 :param headers:
3802 (Optional) Additional HTTP headers to be included as part of the
3803 signed URLs. See:
3804 https://cloud.google.com/storage/docs/xml-api/reference-headers
3805 Requests using the signed URL *must* pass the specified header
3806 (name and value) with each request for the URL.
3807
3808 :type query_parameters: dict
3809 :param query_parameters:
3810 (Optional) Additional query parameters to be included as part of the
3811 signed URLs. See:
3812 https://cloud.google.com/storage/docs/xml-api/reference-headers#query
3813
3814 :type client: :class:`~google.cloud.storage.client.Client` or
3815 ``NoneType``
3816 :param client: (Optional) The client to use. If not passed, falls back
3817 to the ``client`` stored on the blob's bucket.
3818
3819 :type credentials: :class:`google.auth.credentials.Credentials` or
3820 :class:`NoneType`
3821 :param credentials: The authorization credentials to attach to requests.
3822 These credentials identify this application to the service.
3823 If none are specified, the client will attempt to ascertain
3824 the credentials from the environment.
3825
3826 :type version: str
3827 :param version: (Optional) The version of signed credential to create.
3828 Must be one of 'v2' | 'v4'.
3829
3830 :type virtual_hosted_style: bool
3831 :param virtual_hosted_style:
3832 (Optional) If true, then construct the URL relative the bucket's
3833 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
3834 Incompatible with bucket_bound_hostname.
3835
3836 :type bucket_bound_hostname: str
3837 :param bucket_bound_hostname:
3838 (Optional) If passed, then construct the URL relative to the bucket-bound hostname.
3839 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'.
3840 Incompatible with api_access_endpoint and virtual_hosted_style.
3841 See: https://cloud.google.com/storage/docs/request-endpoints#cname
3842
3843 :type scheme: str
3844 :param scheme:
3845 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
3846 this value as the scheme. ``https`` will work only when using a CDN.
3847 Defaults to ``"http"``.
3848
3849 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used.
3850 :raises: :exc:`TypeError` when expiration is not a valid type.
3851 :raises: :exc:`AttributeError` if credentials is not an instance
3852 of :class:`google.auth.credentials.Signing`.
3853
3854 :rtype: str
3855 :returns: A signed URL you can use to access the resource
3856 until expiration.
3857 """
3858 if version is None:
3859 version = "v2"
3860 elif version not in ("v2", "v4"):
3861 raise ValueError("'version' must be either 'v2' or 'v4'")
3862
3863 if (
3864 api_access_endpoint is not None or virtual_hosted_style
3865 ) and bucket_bound_hostname:
3866 raise ValueError(
3867 "The bucket_bound_hostname argument is not compatible with "
3868 "either api_access_endpoint or virtual_hosted_style."
3869 )
3870
3871 if api_access_endpoint is None:
3872 client = self._require_client(client)
3873 api_access_endpoint = client.api_endpoint
3874
3875 # If you are on Google Compute Engine, you can't generate a signed URL
3876 # using GCE service account.
3877 # See https://github.com/googleapis/google-auth-library-python/issues/50
3878 if virtual_hosted_style:
3879 api_access_endpoint = _virtual_hosted_style_base_url(
3880 api_access_endpoint, self.name
3881 )
3882 resource = "/"
3883 elif bucket_bound_hostname:
3884 api_access_endpoint = _bucket_bound_hostname_url(
3885 bucket_bound_hostname, scheme
3886 )
3887 resource = "/"
3888 else:
3889 resource = f"/{self.name}"
3890
3891 if credentials is None:
3892 client = self._require_client(client) # May be redundant, but that's ok.
3893 credentials = client._credentials
3894
3895 if version == "v2":
3896 helper = generate_signed_url_v2
3897 else:
3898 helper = generate_signed_url_v4
3899
3900 return helper(
3901 credentials,
3902 resource=resource,
3903 expiration=expiration,
3904 api_access_endpoint=api_access_endpoint,
3905 method=method.upper(),
3906 headers=headers,
3907 query_parameters=query_parameters,
3908 )
3909
3910 @property
3911 def ip_filter(self):
3912 """Retrieve or set the IP Filter configuration for this bucket.
3913
3914 See https://cloud.google.com/storage/docs/ip-filtering-overview and
3915 https://cloud.google.com/storage/docs/json_api/v1/buckets#ipFilter
3916
3917 .. note::
3918 The getter for this property returns an
3919 :class:`~google.cloud.storage.ip_filter.IPFilter` object, which is a
3920 structured representation of the bucket's IP filter configuration.
3921 Modifying the returned object has no effect. To update the bucket's
3922 IP filter, create and assign a new ``IPFilter`` object to this
3923 property and then call
3924 :meth:`~google.cloud.storage.bucket.Bucket.patch`.
3925
3926 .. code-block:: python
3927
3928 from google.cloud.storage.ip_filter import (
3929 IPFilter,
3930 PublicNetworkSource,
3931 )
3932
3933 ip_filter = IPFilter()
3934 ip_filter.mode = "Enabled"
3935 ip_filter.public_network_source = PublicNetworkSource(
3936 allowed_ip_cidr_ranges=["203.0.113.5/32"]
3937 )
3938 bucket.ip_filter = ip_filter
3939 bucket.patch()
3940
3941 :setter: Set the IP Filter configuration for this bucket.
3942 :getter: Gets the IP Filter configuration for this bucket.
3943
3944 :rtype: :class:`~google.cloud.storage.ip_filter.IPFilter` or ``NoneType``
3945 :returns:
3946 An ``IPFilter`` object representing the configuration, or ``None``
3947 if no filter is configured.
3948 """
3949 resource = self._properties.get(_IP_FILTER_PROPERTY)
3950 if resource:
3951 return IPFilter._from_api_resource(resource)
3952 return None
3953
3954 @ip_filter.setter
3955 def ip_filter(self, value):
3956 if value is None:
3957 self._patch_property(_IP_FILTER_PROPERTY, None)
3958 elif isinstance(value, IPFilter):
3959 self._patch_property(_IP_FILTER_PROPERTY, value._to_api_resource())
3960 else:
3961 self._patch_property(_IP_FILTER_PROPERTY, value)
3962
3963
3964class SoftDeletePolicy(dict):
3965 """Map a bucket's soft delete policy.
3966
3967 See https://cloud.google.com/storage/docs/soft-delete
3968
3969 :type bucket: :class:`Bucket`
3970 :param bucket: Bucket for which this instance is the policy.
3971
3972 :type retention_duration_seconds: int
3973 :param retention_duration_seconds:
3974 (Optional) The period of time in seconds that soft-deleted objects in the bucket
3975 will be retained and cannot be permanently deleted.
3976
3977 :type effective_time: :class:`datetime.datetime`
3978 :param effective_time:
3979 (Optional) When the bucket's soft delete policy is effective.
3980 This value should normally only be set by the back-end API.
3981 """
3982
3983 def __init__(self, bucket, **kw):
3984 data = {}
3985 retention_duration_seconds = kw.get("retention_duration_seconds")
3986 data["retentionDurationSeconds"] = retention_duration_seconds
3987
3988 effective_time = kw.get("effective_time")
3989 if effective_time is not None:
3990 effective_time = _datetime_to_rfc3339(effective_time)
3991 data["effectiveTime"] = effective_time
3992
3993 super().__init__(data)
3994 self._bucket = bucket
3995
3996 @classmethod
3997 def from_api_repr(cls, resource, bucket):
3998 """Factory: construct instance from resource.
3999
4000 :type resource: dict
4001 :param resource: mapping as returned from API call.
4002
4003 :type bucket: :class:`Bucket`
4004 :params bucket: Bucket for which this instance is the policy.
4005
4006 :rtype: :class:`SoftDeletePolicy`
4007 :returns: Instance created from resource.
4008 """
4009 instance = cls(bucket)
4010 instance.update(resource)
4011 return instance
4012
4013 @property
4014 def bucket(self):
4015 """Bucket for which this instance is the policy.
4016
4017 :rtype: :class:`Bucket`
4018 :returns: the instance's bucket.
4019 """
4020 return self._bucket
4021
4022 @property
4023 def retention_duration_seconds(self):
4024 """Get the retention duration of the bucket's soft delete policy.
4025
4026 :rtype: int or ``NoneType``
4027 :returns: The period of time in seconds that soft-deleted objects in the bucket
4028 will be retained and cannot be permanently deleted; Or ``None`` if the
4029 property is not set.
4030 """
4031 duration = self.get("retentionDurationSeconds")
4032 if duration is not None:
4033 return int(duration)
4034
4035 @retention_duration_seconds.setter
4036 def retention_duration_seconds(self, value):
4037 """Set the retention duration of the bucket's soft delete policy.
4038
4039 :type value: int
4040 :param value:
4041 The period of time in seconds that soft-deleted objects in the bucket
4042 will be retained and cannot be permanently deleted.
4043 """
4044 self["retentionDurationSeconds"] = value
4045 self.bucket._patch_property("softDeletePolicy", self)
4046
4047 @property
4048 def effective_time(self):
4049 """Get the effective time of the bucket's soft delete policy.
4050
4051 :rtype: datetime.datetime or ``NoneType``
4052 :returns: point-in time at which the bucket's soft delte policy is
4053 effective, or ``None`` if the property is not set.
4054 """
4055 timestamp = self.get("effectiveTime")
4056 if timestamp is not None:
4057 return _rfc3339_nanos_to_datetime(timestamp)
4058
4059
4060def _raise_if_len_differs(expected_len, **generation_match_args):
4061 """
4062 Raise an error if any generation match argument
4063 is set and its len differs from the given value.
4064
4065 :type expected_len: int
4066 :param expected_len: Expected argument length in case it's set.
4067
4068 :type generation_match_args: dict
4069 :param generation_match_args: Lists, which length must be checked.
4070
4071 :raises: :exc:`ValueError` if any argument set, but has an unexpected length.
4072 """
4073 for name, value in generation_match_args.items():
4074 if value is not None and len(value) != expected_len:
4075 raise ValueError(f"'{name}' length must be the same as 'blobs' length")