1# Copyright 2014 Google LLC
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Create / interact with Google Cloud Storage buckets."""
16
17import base64
18import copy
19import datetime
20import json
21from urllib.parse import urlsplit
22import warnings
23
24from google.api_core import datetime_helpers
25from google.cloud._helpers import _datetime_to_rfc3339
26from google.cloud._helpers import _rfc3339_nanos_to_datetime
27from google.cloud.exceptions import NotFound
28from google.api_core.iam import Policy
29from google.cloud.storage import _signing
30from google.cloud.storage._helpers import _add_etag_match_headers
31from google.cloud.storage._helpers import _add_generation_match_parameters
32from google.cloud.storage._helpers import _NOW
33from google.cloud.storage._helpers import _PropertyMixin
34from google.cloud.storage._helpers import _UTC
35from google.cloud.storage._helpers import _scalar_property
36from google.cloud.storage._helpers import _validate_name
37from google.cloud.storage._signing import generate_signed_url_v2
38from google.cloud.storage._signing import generate_signed_url_v4
39from google.cloud.storage._helpers import _bucket_bound_hostname_url
40from google.cloud.storage._helpers import _virtual_hosted_style_base_url
41from google.cloud.storage._opentelemetry_tracing import create_trace_span
42from google.cloud.storage.acl import BucketACL
43from google.cloud.storage.acl import DefaultObjectACL
44from google.cloud.storage.blob import Blob
45from google.cloud.storage.constants import _DEFAULT_TIMEOUT
46from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS
47from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS
48from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE
49from google.cloud.storage.constants import (
50 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,
51)
52from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS
53from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE
54from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS
55from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
56from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS
57from google.cloud.storage.constants import REGION_LOCATION_TYPE
58from google.cloud.storage.constants import STANDARD_STORAGE_CLASS
59from google.cloud.storage.notification import BucketNotification
60from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
61from google.cloud.storage.retry import DEFAULT_RETRY
62from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
63from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON
64from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED
65
66
67_UBLA_BPO_ENABLED_MESSAGE = (
68 "Pass only one of 'uniform_bucket_level_access_enabled' / "
69 "'bucket_policy_only_enabled' to 'IAMConfiguration'."
70)
71_BPO_ENABLED_MESSAGE = (
72 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. "
73 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'."
74)
75_UBLA_BPO_LOCK_TIME_MESSAGE = (
76 "Pass only one of 'uniform_bucket_level_access_lock_time' / "
77 "'bucket_policy_only_lock_time' to 'IAMConfiguration'."
78)
79_BPO_LOCK_TIME_MESSAGE = (
80 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. "
81 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'."
82)
83_LOCATION_SETTER_MESSAGE = (
84 "Assignment to 'Bucket.location' is deprecated, as it is only "
85 "valid before the bucket is created. Instead, pass the location "
86 "to `Bucket.create`."
87)
88_FROM_STRING_MESSAGE = (
89 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead."
90)
91
92
93def _blobs_page_start(iterator, page, response):
94 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
95
96 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
97 :param iterator: The iterator that is currently in use.
98
99 :type page: :class:`~google.cloud.api.core.page_iterator.Page`
100 :param page: The page that was just created.
101
102 :type response: dict
103 :param response: The JSON API response for a page of blobs.
104 """
105 page.prefixes = tuple(response.get("prefixes", ()))
106 iterator.prefixes.update(page.prefixes)
107
108
109def _item_to_blob(iterator, item):
110 """Convert a JSON blob to the native object.
111
112 .. note::
113
114 This assumes that the ``bucket`` attribute has been
115 added to the iterator after being created.
116
117 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
118 :param iterator: The iterator that has retrieved the item.
119
120 :type item: dict
121 :param item: An item to be converted to a blob.
122
123 :rtype: :class:`.Blob`
124 :returns: The next blob in the page.
125 """
126 name = item.get("name")
127 blob = Blob(name, bucket=iterator.bucket)
128 blob._set_properties(item)
129 return blob
130
131
132def _item_to_notification(iterator, item):
133 """Convert a JSON blob to the native object.
134
135 .. note::
136
137 This assumes that the ``bucket`` attribute has been
138 added to the iterator after being created.
139
140 :type iterator: :class:`~google.api_core.page_iterator.Iterator`
141 :param iterator: The iterator that has retrieved the item.
142
143 :type item: dict
144 :param item: An item to be converted to a blob.
145
146 :rtype: :class:`.BucketNotification`
147 :returns: The next notification being iterated.
148 """
149 return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
150
151
152class LifecycleRuleConditions(dict):
153 """Map a single lifecycle rule for a bucket.
154
155 See: https://cloud.google.com/storage/docs/lifecycle
156
157 :type age: int
158 :param age: (Optional) Apply rule action to items whose age, in days,
159 exceeds this value.
160
161 :type created_before: datetime.date
162 :param created_before: (Optional) Apply rule action to items created
163 before this date.
164
165 :type is_live: bool
166 :param is_live: (Optional) If true, apply rule action to non-versioned
167 items, or to items with no newer versions. If false, apply
168 rule action to versioned items with at least one newer
169 version.
170
171 :type matches_prefix: list(str)
172 :param matches_prefix: (Optional) Apply rule action to items which
173 any prefix matches the beginning of the item name.
174
175 :type matches_storage_class: list(str), one or more of
176 :attr:`Bucket.STORAGE_CLASSES`.
177 :param matches_storage_class: (Optional) Apply rule action to items
178 whose storage class matches this value.
179
180 :type matches_suffix: list(str)
181 :param matches_suffix: (Optional) Apply rule action to items which
182 any suffix matches the end of the item name.
183
184 :type number_of_newer_versions: int
185 :param number_of_newer_versions: (Optional) Apply rule action to versioned
186 items having N newer versions.
187
188 :type days_since_custom_time: int
189 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days
190 elapsed since the custom timestamp. This condition is relevant
191 only for versioned objects. The value of the field must be a non
192 negative integer. If it's zero, the object version will become
193 eligible for lifecycle action as soon as it becomes custom.
194
195 :type custom_time_before: :class:`datetime.date`
196 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action
197 to items whose custom time is before this date. This condition is relevant
198 only for versioned objects, e.g., 2019-03-16.
199
200 :type days_since_noncurrent_time: int
201 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days
202 elapsed since the non current timestamp. This condition
203 is relevant only for versioned objects. The value of the field
204 must be a non negative integer. If it's zero, the object version
205 will become eligible for lifecycle action as soon as it becomes
206 non current.
207
208 :type noncurrent_time_before: :class:`datetime.date`
209 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply
210 rule action to items whose non current time is before this date.
211 This condition is relevant only for versioned objects, e.g, 2019-03-16.
212
213 :raises ValueError: if no arguments are passed.
214 """
215
216 def __init__(
217 self,
218 age=None,
219 created_before=None,
220 is_live=None,
221 matches_storage_class=None,
222 number_of_newer_versions=None,
223 days_since_custom_time=None,
224 custom_time_before=None,
225 days_since_noncurrent_time=None,
226 noncurrent_time_before=None,
227 matches_prefix=None,
228 matches_suffix=None,
229 _factory=False,
230 ):
231 conditions = {}
232
233 if age is not None:
234 conditions["age"] = age
235
236 if created_before is not None:
237 conditions["createdBefore"] = created_before.isoformat()
238
239 if is_live is not None:
240 conditions["isLive"] = is_live
241
242 if matches_storage_class is not None:
243 conditions["matchesStorageClass"] = matches_storage_class
244
245 if number_of_newer_versions is not None:
246 conditions["numNewerVersions"] = number_of_newer_versions
247
248 if days_since_custom_time is not None:
249 conditions["daysSinceCustomTime"] = days_since_custom_time
250
251 if custom_time_before is not None:
252 conditions["customTimeBefore"] = custom_time_before.isoformat()
253
254 if days_since_noncurrent_time is not None:
255 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time
256
257 if noncurrent_time_before is not None:
258 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat()
259
260 if matches_prefix is not None:
261 conditions["matchesPrefix"] = matches_prefix
262
263 if matches_suffix is not None:
264 conditions["matchesSuffix"] = matches_suffix
265
266 if not _factory and not conditions:
267 raise ValueError("Supply at least one condition")
268
269 super(LifecycleRuleConditions, self).__init__(conditions)
270
271 @classmethod
272 def from_api_repr(cls, resource):
273 """Factory: construct instance from resource.
274
275 :type resource: dict
276 :param resource: mapping as returned from API call.
277
278 :rtype: :class:`LifecycleRuleConditions`
279 :returns: Instance created from resource.
280 """
281 instance = cls(_factory=True)
282 instance.update(resource)
283 return instance
284
285 @property
286 def age(self):
287 """Conditon's age value."""
288 return self.get("age")
289
290 @property
291 def created_before(self):
292 """Conditon's created_before value."""
293 before = self.get("createdBefore")
294 if before is not None:
295 return datetime_helpers.from_iso8601_date(before)
296
297 @property
298 def is_live(self):
299 """Conditon's 'is_live' value."""
300 return self.get("isLive")
301
302 @property
303 def matches_prefix(self):
304 """Conditon's 'matches_prefix' value."""
305 return self.get("matchesPrefix")
306
307 @property
308 def matches_storage_class(self):
309 """Conditon's 'matches_storage_class' value."""
310 return self.get("matchesStorageClass")
311
312 @property
313 def matches_suffix(self):
314 """Conditon's 'matches_suffix' value."""
315 return self.get("matchesSuffix")
316
317 @property
318 def number_of_newer_versions(self):
319 """Conditon's 'number_of_newer_versions' value."""
320 return self.get("numNewerVersions")
321
322 @property
323 def days_since_custom_time(self):
324 """Conditon's 'days_since_custom_time' value."""
325 return self.get("daysSinceCustomTime")
326
327 @property
328 def custom_time_before(self):
329 """Conditon's 'custom_time_before' value."""
330 before = self.get("customTimeBefore")
331 if before is not None:
332 return datetime_helpers.from_iso8601_date(before)
333
334 @property
335 def days_since_noncurrent_time(self):
336 """Conditon's 'days_since_noncurrent_time' value."""
337 return self.get("daysSinceNoncurrentTime")
338
339 @property
340 def noncurrent_time_before(self):
341 """Conditon's 'noncurrent_time_before' value."""
342 before = self.get("noncurrentTimeBefore")
343 if before is not None:
344 return datetime_helpers.from_iso8601_date(before)
345
346
347class LifecycleRuleDelete(dict):
348 """Map a lifecycle rule deleting matching items.
349
350 :type kw: dict
351 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
352 """
353
354 def __init__(self, **kw):
355 conditions = LifecycleRuleConditions(**kw)
356 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)}
357 super().__init__(rule)
358
359 @classmethod
360 def from_api_repr(cls, resource):
361 """Factory: construct instance from resource.
362
363 :type resource: dict
364 :param resource: mapping as returned from API call.
365
366 :rtype: :class:`LifecycleRuleDelete`
367 :returns: Instance created from resource.
368 """
369 instance = cls(_factory=True)
370 instance.update(resource)
371 return instance
372
373
374class LifecycleRuleSetStorageClass(dict):
375 """Map a lifecycle rule updating storage class of matching items.
376
377 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`.
378 :param storage_class: new storage class to assign to matching items.
379
380 :type kw: dict
381 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
382 """
383
384 def __init__(self, storage_class, **kw):
385 conditions = LifecycleRuleConditions(**kw)
386 rule = {
387 "action": {"type": "SetStorageClass", "storageClass": storage_class},
388 "condition": dict(conditions),
389 }
390 super().__init__(rule)
391
392 @classmethod
393 def from_api_repr(cls, resource):
394 """Factory: construct instance from resource.
395
396 :type resource: dict
397 :param resource: mapping as returned from API call.
398
399 :rtype: :class:`LifecycleRuleSetStorageClass`
400 :returns: Instance created from resource.
401 """
402 action = resource["action"]
403 instance = cls(action["storageClass"], _factory=True)
404 instance.update(resource)
405 return instance
406
407
408class LifecycleRuleAbortIncompleteMultipartUpload(dict):
409 """Map a rule aborting incomplete multipart uploads of matching items.
410
411 The "age" lifecycle condition is the only supported condition for this rule.
412
413 :type kw: dict
414 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
415 """
416
417 def __init__(self, **kw):
418 conditions = LifecycleRuleConditions(**kw)
419 rule = {
420 "action": {"type": "AbortIncompleteMultipartUpload"},
421 "condition": dict(conditions),
422 }
423 super().__init__(rule)
424
425 @classmethod
426 def from_api_repr(cls, resource):
427 """Factory: construct instance from resource.
428
429 :type resource: dict
430 :param resource: mapping as returned from API call.
431
432 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload`
433 :returns: Instance created from resource.
434 """
435 instance = cls(_factory=True)
436 instance.update(resource)
437 return instance
438
439
440_default = object()
441
442
443class IAMConfiguration(dict):
444 """Map a bucket's IAM configuration.
445
446 :type bucket: :class:`Bucket`
447 :params bucket: Bucket for which this instance is the policy.
448
449 :type public_access_prevention: str
450 :params public_access_prevention:
451 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced'
452 See: https://cloud.google.com/storage/docs/public-access-prevention
453
454 :type uniform_bucket_level_access_enabled: bool
455 :params bucket_policy_only_enabled:
456 (Optional) Whether the IAM-only policy is enabled for the bucket.
457
458 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime`
459 :params uniform_bucket_level_locked_time:
460 (Optional) When the bucket's IAM-only policy was enabled.
461 This value should normally only be set by the back-end API.
462
463 :type bucket_policy_only_enabled: bool
464 :params bucket_policy_only_enabled:
465 Deprecated alias for :data:`uniform_bucket_level_access_enabled`.
466
467 :type bucket_policy_only_locked_time: :class:`datetime.datetime`
468 :params bucket_policy_only_locked_time:
469 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`.
470 """
471
472 def __init__(
473 self,
474 bucket,
475 public_access_prevention=_default,
476 uniform_bucket_level_access_enabled=_default,
477 uniform_bucket_level_access_locked_time=_default,
478 bucket_policy_only_enabled=_default,
479 bucket_policy_only_locked_time=_default,
480 ):
481 if bucket_policy_only_enabled is not _default:
482 if uniform_bucket_level_access_enabled is not _default:
483 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE)
484
485 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
486 uniform_bucket_level_access_enabled = bucket_policy_only_enabled
487
488 if bucket_policy_only_locked_time is not _default:
489 if uniform_bucket_level_access_locked_time is not _default:
490 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE)
491
492 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2)
493 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time
494
495 if uniform_bucket_level_access_enabled is _default:
496 uniform_bucket_level_access_enabled = False
497
498 if public_access_prevention is _default:
499 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED
500
501 data = {
502 "uniformBucketLevelAccess": {
503 "enabled": uniform_bucket_level_access_enabled
504 },
505 "publicAccessPrevention": public_access_prevention,
506 }
507 if uniform_bucket_level_access_locked_time is not _default:
508 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339(
509 uniform_bucket_level_access_locked_time
510 )
511 super(IAMConfiguration, self).__init__(data)
512 self._bucket = bucket
513
514 @classmethod
515 def from_api_repr(cls, resource, bucket):
516 """Factory: construct instance from resource.
517
518 :type bucket: :class:`Bucket`
519 :params bucket: Bucket for which this instance is the policy.
520
521 :type resource: dict
522 :param resource: mapping as returned from API call.
523
524 :rtype: :class:`IAMConfiguration`
525 :returns: Instance created from resource.
526 """
527 instance = cls(bucket)
528 instance.update(resource)
529 return instance
530
531 @property
532 def bucket(self):
533 """Bucket for which this instance is the policy.
534
535 :rtype: :class:`Bucket`
536 :returns: the instance's bucket.
537 """
538 return self._bucket
539
540 @property
541 def public_access_prevention(self):
542 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'.
543
544 See: https://cloud.google.com/storage/docs/public-access-prevention
545
546 :rtype: string
547 :returns: the public access prevention status, either 'enforced' or 'inherited'.
548 """
549 return self["publicAccessPrevention"]
550
551 @public_access_prevention.setter
552 def public_access_prevention(self, value):
553 self["publicAccessPrevention"] = value
554 self.bucket._patch_property("iamConfiguration", self)
555
556 @property
557 def uniform_bucket_level_access_enabled(self):
558 """If set, access checks only use bucket-level IAM policies or above.
559
560 :rtype: bool
561 :returns: whether the bucket is configured to allow only IAM.
562 """
563 ubla = self.get("uniformBucketLevelAccess", {})
564 return ubla.get("enabled", False)
565
566 @uniform_bucket_level_access_enabled.setter
567 def uniform_bucket_level_access_enabled(self, value):
568 ubla = self.setdefault("uniformBucketLevelAccess", {})
569 ubla["enabled"] = bool(value)
570 self.bucket._patch_property("iamConfiguration", self)
571
572 @property
573 def uniform_bucket_level_access_locked_time(self):
574 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false.
575
576 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property
577 is time time after which that setting becomes immutable.
578
579 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property
580 is ``None``.
581
582 :rtype: Union[:class:`datetime.datetime`, None]
583 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will
584 be frozen as true.
585 """
586 ubla = self.get("uniformBucketLevelAccess", {})
587 stamp = ubla.get("lockedTime")
588 if stamp is not None:
589 stamp = _rfc3339_nanos_to_datetime(stamp)
590 return stamp
591
592 @property
593 def bucket_policy_only_enabled(self):
594 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`.
595
596 :rtype: bool
597 :returns: whether the bucket is configured to allow only IAM.
598 """
599 return self.uniform_bucket_level_access_enabled
600
601 @bucket_policy_only_enabled.setter
602 def bucket_policy_only_enabled(self, value):
603 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
604 self.uniform_bucket_level_access_enabled = value
605
606 @property
607 def bucket_policy_only_locked_time(self):
608 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`.
609
610 :rtype: Union[:class:`datetime.datetime`, None]
611 :returns:
612 (readonly) Time after which :attr:`bucket_policy_only_enabled` will
613 be frozen as true.
614 """
615 return self.uniform_bucket_level_access_locked_time
616
617
618class Bucket(_PropertyMixin):
619 """A class representing a Bucket on Cloud Storage.
620
621 :type client: :class:`google.cloud.storage.client.Client`
622 :param client: A client which holds credentials and project configuration
623 for the bucket (which requires a project).
624
625 :type name: str
626 :param name: The name of the bucket. Bucket names must start and end with a
627 number or letter.
628
629 :type user_project: str
630 :param user_project: (Optional) the project ID to be billed for API
631 requests made via this instance.
632
633 :type generation: int
634 :param generation: (Optional) If present, selects a specific revision of
635 this bucket.
636 """
637
638 _MAX_OBJECTS_FOR_ITERATION = 256
639 """Maximum number of existing objects allowed in iteration.
640
641 This is used in Bucket.delete() and Bucket.make_public().
642 """
643
644 STORAGE_CLASSES = (
645 STANDARD_STORAGE_CLASS,
646 NEARLINE_STORAGE_CLASS,
647 COLDLINE_STORAGE_CLASS,
648 ARCHIVE_STORAGE_CLASS,
649 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy
650 REGIONAL_LEGACY_STORAGE_CLASS, # legacy
651 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy
652 )
653 """Allowed values for :attr:`storage_class`.
654
655 Default value is :attr:`STANDARD_STORAGE_CLASS`.
656
657 See
658 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
659 https://cloud.google.com/storage/docs/storage-classes
660 """
661
662 _LOCATION_TYPES = (
663 MULTI_REGION_LOCATION_TYPE,
664 REGION_LOCATION_TYPE,
665 DUAL_REGION_LOCATION_TYPE,
666 )
667 """Allowed values for :attr:`location_type`."""
668
669 def __init__(self, client, name=None, user_project=None, generation=None):
670 """
671 property :attr:`name`
672 Get the bucket's name.
673 """
674 name = _validate_name(name)
675 super(Bucket, self).__init__(name=name)
676 self._client = client
677 self._acl = BucketACL(self)
678 self._default_object_acl = DefaultObjectACL(self)
679 self._label_removals = set()
680 self._user_project = user_project
681
682 if generation is not None:
683 self._properties["generation"] = generation
684
685 def __repr__(self):
686 return f"<Bucket: {self.name}>"
687
688 @property
689 def client(self):
690 """The client bound to this bucket."""
691 return self._client
692
693 def _set_properties(self, value):
694 """Set the properties for the current object.
695
696 :type value: dict or :class:`google.cloud.storage.batch._FutureDict`
697 :param value: The properties to be set.
698 """
699 self._label_removals.clear()
700 return super(Bucket, self)._set_properties(value)
701
702 @property
703 def rpo(self):
704 """Get the RPO (Recovery Point Objective) of this bucket
705
706 See: https://cloud.google.com/storage/docs/managing-turbo-replication
707
708 "ASYNC_TURBO" or "DEFAULT"
709 :rtype: str
710 """
711 return self._properties.get("rpo")
712
713 @rpo.setter
714 def rpo(self, value):
715 """
716 Set the RPO (Recovery Point Objective) of this bucket.
717
718 See: https://cloud.google.com/storage/docs/managing-turbo-replication
719
720 :type value: str
721 :param value: "ASYNC_TURBO" or "DEFAULT"
722 """
723 self._patch_property("rpo", value)
724
725 @property
726 def user_project(self):
727 """Project ID to be billed for API requests made via this bucket.
728
729 If unset, API requests are billed to the bucket owner.
730
731 A user project is required for all operations on Requester Pays buckets.
732
733 See https://cloud.google.com/storage/docs/requester-pays#requirements for details.
734
735 :rtype: str
736 """
737 return self._user_project
738
739 @property
740 def generation(self):
741 """Retrieve the generation for the bucket.
742
743 :rtype: int or ``NoneType``
744 :returns: The generation of the bucket or ``None`` if the bucket's
745 resource has not been loaded from the server.
746 """
747 generation = self._properties.get("generation")
748 if generation is not None:
749 return int(generation)
750
751 @property
752 def soft_delete_time(self):
753 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted.
754
755 :rtype: :class:`datetime.datetime` or ``NoneType``
756 :returns:
757 (readonly) The time that the bucket became soft-deleted.
758 Note this property is only set for soft-deleted buckets.
759 """
760 soft_delete_time = self._properties.get("softDeleteTime")
761 if soft_delete_time is not None:
762 return _rfc3339_nanos_to_datetime(soft_delete_time)
763
764 @property
765 def hard_delete_time(self):
766 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted.
767
768 :rtype: :class:`datetime.datetime` or ``NoneType``
769 :returns:
770 (readonly) The time that the bucket will be permanently deleted.
771 Note this property is only set for soft-deleted buckets.
772 """
773 hard_delete_time = self._properties.get("hardDeleteTime")
774 if hard_delete_time is not None:
775 return _rfc3339_nanos_to_datetime(hard_delete_time)
776
777 @property
778 def _query_params(self):
779 """Default query parameters."""
780 params = super()._query_params
781 return params
782
783 @classmethod
784 def from_uri(cls, uri, client=None):
785 """Get a constructor for bucket object by URI.
786
787 .. code-block:: python
788
789 from google.cloud import storage
790 from google.cloud.storage.bucket import Bucket
791 client = storage.Client()
792 bucket = Bucket.from_uri("gs://bucket", client=client)
793
794 :type uri: str
795 :param uri: The bucket uri pass to get bucket object.
796
797 :type client: :class:`~google.cloud.storage.client.Client` or
798 ``NoneType``
799 :param client: (Optional) The client to use. Application code should
800 *always* pass ``client``.
801
802 :rtype: :class:`google.cloud.storage.bucket.Bucket`
803 :returns: The bucket object created.
804 """
805 scheme, netloc, path, query, frag = urlsplit(uri)
806
807 if scheme != "gs":
808 raise ValueError("URI scheme must be gs")
809
810 return cls(client, name=netloc)
811
812 @classmethod
813 def from_string(cls, uri, client=None):
814 """Get a constructor for bucket object by URI.
815
816 .. note::
817 Deprecated alias for :meth:`from_uri`.
818
819 .. code-block:: python
820
821 from google.cloud import storage
822 from google.cloud.storage.bucket import Bucket
823 client = storage.Client()
824 bucket = Bucket.from_string("gs://bucket", client=client)
825
826 :type uri: str
827 :param uri: The bucket uri pass to get bucket object.
828
829 :type client: :class:`~google.cloud.storage.client.Client` or
830 ``NoneType``
831 :param client: (Optional) The client to use. Application code should
832 *always* pass ``client``.
833
834 :rtype: :class:`google.cloud.storage.bucket.Bucket`
835 :returns: The bucket object created.
836 """
837 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2)
838 return Bucket.from_uri(uri=uri, client=client)
839
840 def blob(
841 self,
842 blob_name,
843 chunk_size=None,
844 encryption_key=None,
845 kms_key_name=None,
846 generation=None,
847 ):
848 """Factory constructor for blob object.
849
850 .. note::
851 This will not make an HTTP request; it simply instantiates
852 a blob object owned by this bucket.
853
854 :type blob_name: str
855 :param blob_name: The name of the blob to be instantiated.
856
857 :type chunk_size: int
858 :param chunk_size: The size of a chunk of data whenever iterating
859 (in bytes). This must be a multiple of 256 KB per
860 the API specification.
861
862 :type encryption_key: bytes
863 :param encryption_key:
864 (Optional) 32 byte encryption key for customer-supplied encryption.
865
866 :type kms_key_name: str
867 :param kms_key_name:
868 (Optional) Resource name of KMS key used to encrypt blob's content.
869
870 :type generation: long
871 :param generation: (Optional) If present, selects a specific revision of
872 this object.
873
874 :rtype: :class:`google.cloud.storage.blob.Blob`
875 :returns: The blob object created.
876 """
877 return Blob(
878 name=blob_name,
879 bucket=self,
880 chunk_size=chunk_size,
881 encryption_key=encryption_key,
882 kms_key_name=kms_key_name,
883 generation=generation,
884 )
885
886 def notification(
887 self,
888 topic_name=None,
889 topic_project=None,
890 custom_attributes=None,
891 event_types=None,
892 blob_name_prefix=None,
893 payload_format=NONE_PAYLOAD_FORMAT,
894 notification_id=None,
895 ):
896 """Factory: create a notification resource for the bucket.
897
898 See: :class:`.BucketNotification` for parameters.
899
900 :rtype: :class:`.BucketNotification`
901 """
902 return BucketNotification(
903 self,
904 topic_name=topic_name,
905 topic_project=topic_project,
906 custom_attributes=custom_attributes,
907 event_types=event_types,
908 blob_name_prefix=blob_name_prefix,
909 payload_format=payload_format,
910 notification_id=notification_id,
911 )
912
913 def exists(
914 self,
915 client=None,
916 timeout=_DEFAULT_TIMEOUT,
917 if_etag_match=None,
918 if_etag_not_match=None,
919 if_metageneration_match=None,
920 if_metageneration_not_match=None,
921 retry=DEFAULT_RETRY,
922 ):
923 """Determines whether or not this bucket exists.
924
925 If :attr:`user_project` is set, bills the API request to that project.
926
927 :type client: :class:`~google.cloud.storage.client.Client` or
928 ``NoneType``
929 :param client: (Optional) The client to use. If not passed, falls back
930 to the ``client`` stored on the current bucket.
931
932 :type timeout: float or tuple
933 :param timeout:
934 (Optional) The amount of time, in seconds, to wait
935 for the server response. See: :ref:`configuring_timeouts`
936
937 :type if_etag_match: Union[str, Set[str]]
938 :param if_etag_match: (Optional) Make the operation conditional on whether the
939 bucket's current ETag matches the given value.
940
941 :type if_etag_not_match: Union[str, Set[str]])
942 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
943 bucket's current ETag does not match the given value.
944
945 :type if_metageneration_match: long
946 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
947 bucket's current metageneration matches the given value.
948
949 :type if_metageneration_not_match: long
950 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
951 bucket's current metageneration does not match the given value.
952
953 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
954 :param retry:
955 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
956
957 :rtype: bool
958 :returns: True if the bucket exists in Cloud Storage.
959 """
960 with create_trace_span(name="Storage.Bucket.exists"):
961 client = self._require_client(client)
962 # We only need the status code (200 or not) so we seek to
963 # minimize the returned payload.
964 query_params = {"fields": "name"}
965
966 if self.user_project is not None:
967 query_params["userProject"] = self.user_project
968
969 _add_generation_match_parameters(
970 query_params,
971 if_metageneration_match=if_metageneration_match,
972 if_metageneration_not_match=if_metageneration_not_match,
973 )
974
975 headers = {}
976 _add_etag_match_headers(
977 headers,
978 if_etag_match=if_etag_match,
979 if_etag_not_match=if_etag_not_match,
980 )
981
982 try:
983 # We intentionally pass `_target_object=None` since fields=name
984 # would limit the local properties.
985 client._get_resource(
986 self.path,
987 query_params=query_params,
988 headers=headers,
989 timeout=timeout,
990 retry=retry,
991 _target_object=None,
992 )
993 except NotFound:
994 # NOTE: This will not fail immediately in a batch. However, when
995 # Batch.finish() is called, the resulting `NotFound` will be
996 # raised.
997 return False
998 return True
999
1000 def create(
1001 self,
1002 client=None,
1003 project=None,
1004 location=None,
1005 predefined_acl=None,
1006 predefined_default_object_acl=None,
1007 enable_object_retention=False,
1008 timeout=_DEFAULT_TIMEOUT,
1009 retry=DEFAULT_RETRY,
1010 ):
1011 """Creates current bucket.
1012
1013 If the bucket already exists, will raise
1014 :class:`google.cloud.exceptions.Conflict`.
1015
1016 This implements "storage.buckets.insert".
1017
1018 If :attr:`user_project` is set, bills the API request to that project.
1019
1020 :type client: :class:`~google.cloud.storage.client.Client` or
1021 ``NoneType``
1022 :param client: (Optional) The client to use. If not passed, falls back
1023 to the ``client`` stored on the current bucket.
1024
1025 :type project: str
1026 :param project: (Optional) The project under which the bucket is to
1027 be created. If not passed, uses the project set on
1028 the client.
1029 :raises ValueError: if ``project`` is None and client's
1030 :attr:`project` is also None.
1031
1032 :type location: str
1033 :param location: (Optional) The location of the bucket. If not passed,
1034 the default location, US, will be used. See
1035 https://cloud.google.com/storage/docs/bucket-locations
1036
1037 :type predefined_acl: str
1038 :param predefined_acl:
1039 (Optional) Name of predefined ACL to apply to bucket. See:
1040 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1041
1042 :type predefined_default_object_acl: str
1043 :param predefined_default_object_acl:
1044 (Optional) Name of predefined ACL to apply to bucket's objects. See:
1045 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
1046
1047 :type enable_object_retention: bool
1048 :param enable_object_retention:
1049 (Optional) Whether object retention should be enabled on this bucket. See:
1050 https://cloud.google.com/storage/docs/object-lock
1051
1052 :type timeout: float or tuple
1053 :param timeout:
1054 (Optional) The amount of time, in seconds, to wait
1055 for the server response. See: :ref:`configuring_timeouts`
1056
1057 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1058 :param retry:
1059 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1060 """
1061 with create_trace_span(name="Storage.Bucket.create"):
1062 client = self._require_client(client)
1063 client.create_bucket(
1064 bucket_or_name=self,
1065 project=project,
1066 user_project=self.user_project,
1067 location=location,
1068 predefined_acl=predefined_acl,
1069 predefined_default_object_acl=predefined_default_object_acl,
1070 enable_object_retention=enable_object_retention,
1071 timeout=timeout,
1072 retry=retry,
1073 )
1074
1075 def update(
1076 self,
1077 client=None,
1078 timeout=_DEFAULT_TIMEOUT,
1079 if_metageneration_match=None,
1080 if_metageneration_not_match=None,
1081 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1082 ):
1083 """Sends all properties in a PUT request.
1084
1085 Updates the ``_properties`` with the response from the backend.
1086
1087 If :attr:`user_project` is set, bills the API request to that project.
1088
1089 :type client: :class:`~google.cloud.storage.client.Client` or
1090 ``NoneType``
1091 :param client: the client to use. If not passed, falls back to the
1092 ``client`` stored on the current object.
1093
1094 :type timeout: float or tuple
1095 :param timeout:
1096 (Optional) The amount of time, in seconds, to wait
1097 for the server response. See: :ref:`configuring_timeouts`
1098
1099 :type if_metageneration_match: long
1100 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1101 blob's current metageneration matches the given value.
1102
1103 :type if_metageneration_not_match: long
1104 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1105 blob's current metageneration does not match the given value.
1106
1107 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1108 :param retry:
1109 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1110 """
1111 with create_trace_span(name="Storage.Bucket.update"):
1112 super(Bucket, self).update(
1113 client=client,
1114 timeout=timeout,
1115 if_metageneration_match=if_metageneration_match,
1116 if_metageneration_not_match=if_metageneration_not_match,
1117 retry=retry,
1118 )
1119
1120 def reload(
1121 self,
1122 client=None,
1123 projection="noAcl",
1124 timeout=_DEFAULT_TIMEOUT,
1125 if_etag_match=None,
1126 if_etag_not_match=None,
1127 if_metageneration_match=None,
1128 if_metageneration_not_match=None,
1129 retry=DEFAULT_RETRY,
1130 soft_deleted=None,
1131 ):
1132 """Reload properties from Cloud Storage.
1133
1134 If :attr:`user_project` is set, bills the API request to that project.
1135
1136 :type client: :class:`~google.cloud.storage.client.Client` or
1137 ``NoneType``
1138 :param client: the client to use. If not passed, falls back to the
1139 ``client`` stored on the current object.
1140
1141 :type projection: str
1142 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1143 Defaults to ``'noAcl'``. Specifies the set of
1144 properties to return.
1145
1146 :type timeout: float or tuple
1147 :param timeout:
1148 (Optional) The amount of time, in seconds, to wait
1149 for the server response. See: :ref:`configuring_timeouts`
1150
1151 :type if_etag_match: Union[str, Set[str]]
1152 :param if_etag_match: (Optional) Make the operation conditional on whether the
1153 bucket's current ETag matches the given value.
1154
1155 :type if_etag_not_match: Union[str, Set[str]])
1156 :param if_etag_not_match: (Optional) Make the operation conditional on whether the
1157 bucket's current ETag does not match the given value.
1158
1159 :type if_metageneration_match: long
1160 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1161 bucket's current metageneration matches the given value.
1162
1163 :type if_metageneration_not_match: long
1164 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1165 bucket's current metageneration does not match the given value.
1166
1167 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1168 :param retry:
1169 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1170
1171 :type soft_deleted: bool
1172 :param soft_deleted: (Optional) If True, looks for a soft-deleted
1173 bucket. Will only return the bucket metadata if the bucket exists
1174 and is in a soft-deleted state. The bucket ``generation`` must be
1175 set if ``soft_deleted`` is set to True.
1176 See: https://cloud.google.com/storage/docs/soft-delete
1177 """
1178 with create_trace_span(name="Storage.Bucket.reload"):
1179 super(Bucket, self).reload(
1180 client=client,
1181 projection=projection,
1182 timeout=timeout,
1183 if_etag_match=if_etag_match,
1184 if_etag_not_match=if_etag_not_match,
1185 if_metageneration_match=if_metageneration_match,
1186 if_metageneration_not_match=if_metageneration_not_match,
1187 retry=retry,
1188 soft_deleted=soft_deleted,
1189 )
1190
1191 def patch(
1192 self,
1193 client=None,
1194 timeout=_DEFAULT_TIMEOUT,
1195 if_metageneration_match=None,
1196 if_metageneration_not_match=None,
1197 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1198 ):
1199 """Sends all changed properties in a PATCH request.
1200
1201 Updates the ``_properties`` with the response from the backend.
1202
1203 If :attr:`user_project` is set, bills the API request to that project.
1204
1205 :type client: :class:`~google.cloud.storage.client.Client` or
1206 ``NoneType``
1207 :param client: the client to use. If not passed, falls back to the
1208 ``client`` stored on the current object.
1209
1210 :type timeout: float or tuple
1211 :param timeout:
1212 (Optional) The amount of time, in seconds, to wait
1213 for the server response. See: :ref:`configuring_timeouts`
1214
1215 :type if_metageneration_match: long
1216 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1217 blob's current metageneration matches the given value.
1218
1219 :type if_metageneration_not_match: long
1220 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1221 blob's current metageneration does not match the given value.
1222
1223 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1224 :param retry:
1225 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1226 """
1227 with create_trace_span(name="Storage.Bucket.patch"):
1228 # Special case: For buckets, it is possible that labels are being
1229 # removed; this requires special handling.
1230 if self._label_removals:
1231 self._changes.add("labels")
1232 self._properties.setdefault("labels", {})
1233 for removed_label in self._label_removals:
1234 self._properties["labels"][removed_label] = None
1235
1236 # Call the superclass method.
1237 super(Bucket, self).patch(
1238 client=client,
1239 if_metageneration_match=if_metageneration_match,
1240 if_metageneration_not_match=if_metageneration_not_match,
1241 timeout=timeout,
1242 retry=retry,
1243 )
1244
1245 @property
1246 def acl(self):
1247 """Create our ACL on demand."""
1248 return self._acl
1249
1250 @property
1251 def default_object_acl(self):
1252 """Create our defaultObjectACL on demand."""
1253 return self._default_object_acl
1254
1255 @staticmethod
1256 def path_helper(bucket_name):
1257 """Relative URL path for a bucket.
1258
1259 :type bucket_name: str
1260 :param bucket_name: The bucket name in the path.
1261
1262 :rtype: str
1263 :returns: The relative URL path for ``bucket_name``.
1264 """
1265 return "/b/" + bucket_name
1266
1267 @property
1268 def path(self):
1269 """The URL path to this bucket."""
1270 if not self.name:
1271 raise ValueError("Cannot determine path without bucket name.")
1272
1273 return self.path_helper(self.name)
1274
1275 def get_blob(
1276 self,
1277 blob_name,
1278 client=None,
1279 encryption_key=None,
1280 generation=None,
1281 if_etag_match=None,
1282 if_etag_not_match=None,
1283 if_generation_match=None,
1284 if_generation_not_match=None,
1285 if_metageneration_match=None,
1286 if_metageneration_not_match=None,
1287 timeout=_DEFAULT_TIMEOUT,
1288 retry=DEFAULT_RETRY,
1289 soft_deleted=None,
1290 **kwargs,
1291 ):
1292 """Get a blob object by name.
1293
1294 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python)
1295 on how to retrieve metadata of an object.
1296
1297 If :attr:`user_project` is set, bills the API request to that project.
1298
1299 :type blob_name: str
1300 :param blob_name: The name of the blob to retrieve.
1301
1302 :type client: :class:`~google.cloud.storage.client.Client` or
1303 ``NoneType``
1304 :param client: (Optional) The client to use. If not passed, falls back
1305 to the ``client`` stored on the current bucket.
1306
1307 :type encryption_key: bytes
1308 :param encryption_key:
1309 (Optional) 32 byte encryption key for customer-supplied encryption.
1310 See
1311 https://cloud.google.com/storage/docs/encryption#customer-supplied.
1312
1313 :type generation: long
1314 :param generation:
1315 (Optional) If present, selects a specific revision of this object.
1316
1317 :type if_etag_match: Union[str, Set[str]]
1318 :param if_etag_match:
1319 (Optional) See :ref:`using-if-etag-match`
1320
1321 :type if_etag_not_match: Union[str, Set[str]]
1322 :param if_etag_not_match:
1323 (Optional) See :ref:`using-if-etag-not-match`
1324
1325 :type if_generation_match: long
1326 :param if_generation_match:
1327 (Optional) See :ref:`using-if-generation-match`
1328
1329 :type if_generation_not_match: long
1330 :param if_generation_not_match:
1331 (Optional) See :ref:`using-if-generation-not-match`
1332
1333 :type if_metageneration_match: long
1334 :param if_metageneration_match:
1335 (Optional) See :ref:`using-if-metageneration-match`
1336
1337 :type if_metageneration_not_match: long
1338 :param if_metageneration_not_match:
1339 (Optional) See :ref:`using-if-metageneration-not-match`
1340
1341 :type timeout: float or tuple
1342 :param timeout:
1343 (Optional) The amount of time, in seconds, to wait
1344 for the server response. See: :ref:`configuring_timeouts`
1345
1346 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1347 :param retry:
1348 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1349
1350 :type soft_deleted: bool
1351 :param soft_deleted:
1352 (Optional) If True, looks for a soft-deleted object. Will only return
1353 the object metadata if the object exists and is in a soft-deleted state.
1354 Object ``generation`` is required if ``soft_deleted`` is set to True.
1355 See: https://cloud.google.com/storage/docs/soft-delete
1356
1357 :param kwargs: Keyword arguments to pass to the
1358 :class:`~google.cloud.storage.blob.Blob` constructor.
1359
1360 :rtype: :class:`google.cloud.storage.blob.Blob` or None
1361 :returns: The blob object if it exists, otherwise None.
1362 """
1363 with create_trace_span(name="Storage.Bucket.getBlob"):
1364 blob = Blob(
1365 bucket=self,
1366 name=blob_name,
1367 encryption_key=encryption_key,
1368 generation=generation,
1369 **kwargs,
1370 )
1371 try:
1372 # NOTE: This will not fail immediately in a batch. However, when
1373 # Batch.finish() is called, the resulting `NotFound` will be
1374 # raised.
1375 blob.reload(
1376 client=client,
1377 timeout=timeout,
1378 if_etag_match=if_etag_match,
1379 if_etag_not_match=if_etag_not_match,
1380 if_generation_match=if_generation_match,
1381 if_generation_not_match=if_generation_not_match,
1382 if_metageneration_match=if_metageneration_match,
1383 if_metageneration_not_match=if_metageneration_not_match,
1384 retry=retry,
1385 soft_deleted=soft_deleted,
1386 )
1387 except NotFound:
1388 return None
1389 else:
1390 return blob
1391
1392 def list_blobs(
1393 self,
1394 max_results=None,
1395 page_token=None,
1396 prefix=None,
1397 delimiter=None,
1398 start_offset=None,
1399 end_offset=None,
1400 include_trailing_delimiter=None,
1401 versions=None,
1402 projection="noAcl",
1403 fields=None,
1404 client=None,
1405 timeout=_DEFAULT_TIMEOUT,
1406 retry=DEFAULT_RETRY,
1407 match_glob=None,
1408 include_folders_as_prefixes=None,
1409 soft_deleted=None,
1410 page_size=None,
1411 ):
1412 """Return an iterator used to find blobs in the bucket.
1413
1414 If :attr:`user_project` is set, bills the API request to that project.
1415
1416 :type max_results: int
1417 :param max_results:
1418 (Optional) The maximum number of blobs to return.
1419
1420 :type page_token: str
1421 :param page_token:
1422 (Optional) If present, return the next batch of blobs, using the
1423 value, which must correspond to the ``nextPageToken`` value
1424 returned in the previous response. Deprecated: use the ``pages``
1425 property of the returned iterator instead of manually passing the
1426 token.
1427
1428 :type prefix: str
1429 :param prefix: (Optional) Prefix used to filter blobs.
1430
1431 :type delimiter: str
1432 :param delimiter: (Optional) Delimiter, used with ``prefix`` to
1433 emulate hierarchy.
1434
1435 :type start_offset: str
1436 :param start_offset:
1437 (Optional) Filter results to objects whose names are
1438 lexicographically equal to or after ``startOffset``. If
1439 ``endOffset`` is also set, the objects listed will have names
1440 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive).
1441
1442 :type end_offset: str
1443 :param end_offset:
1444 (Optional) Filter results to objects whose names are
1445 lexicographically before ``endOffset``. If ``startOffset`` is also
1446 set, the objects listed will have names between ``startOffset``
1447 (inclusive) and ``endOffset`` (exclusive).
1448
1449 :type include_trailing_delimiter: boolean
1450 :param include_trailing_delimiter:
1451 (Optional) If true, objects that end in exactly one instance of
1452 ``delimiter`` will have their metadata included in ``items`` in
1453 addition to ``prefixes``.
1454
1455 :type versions: bool
1456 :param versions: (Optional) Whether object versions should be returned
1457 as separate blobs.
1458
1459 :type projection: str
1460 :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1461 Defaults to ``'noAcl'``. Specifies the set of
1462 properties to return.
1463
1464 :type fields: str
1465 :param fields:
1466 (Optional) Selector specifying which fields to include
1467 in a partial response. Must be a list of fields. For
1468 example to get a partial response with just the next
1469 page token and the name and language of each blob returned:
1470 ``'items(name,contentLanguage),nextPageToken'``.
1471 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
1472
1473 :type client: :class:`~google.cloud.storage.client.Client`
1474 :param client: (Optional) The client to use. If not passed, falls back
1475 to the ``client`` stored on the current bucket.
1476
1477 :type timeout: float or tuple
1478 :param timeout:
1479 (Optional) The amount of time, in seconds, to wait
1480 for the server response. See: :ref:`configuring_timeouts`
1481
1482 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1483 :param retry:
1484 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1485
1486 :type match_glob: str
1487 :param match_glob:
1488 (Optional) A glob pattern used to filter results (for example, foo*bar).
1489 The string value must be UTF-8 encoded. See:
1490 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob
1491
1492 :type include_folders_as_prefixes: bool
1493 (Optional) If true, includes Folders and Managed Folders in the set of
1494 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /.
1495 See: https://cloud.google.com/storage/docs/managed-folders
1496
1497 :type soft_deleted: bool
1498 :param soft_deleted:
1499 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing
1500 generation number. This parameter can only be used successfully if the bucket has a soft delete policy.
1501 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See:
1502 https://cloud.google.com/storage/docs/soft-delete
1503
1504 :type page_size: int
1505 :param page_size:
1506 (Optional) Maximum number of blobs to return in each page.
1507 Defaults to a value set by the API.
1508
1509 :rtype: :class:`~google.api_core.page_iterator.Iterator`
1510 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
1511 in this bucket matching the arguments.
1512 """
1513 with create_trace_span(name="Storage.Bucket.listBlobs"):
1514 client = self._require_client(client)
1515 return client.list_blobs(
1516 self,
1517 max_results=max_results,
1518 page_token=page_token,
1519 prefix=prefix,
1520 delimiter=delimiter,
1521 start_offset=start_offset,
1522 end_offset=end_offset,
1523 include_trailing_delimiter=include_trailing_delimiter,
1524 versions=versions,
1525 projection=projection,
1526 fields=fields,
1527 page_size=page_size,
1528 timeout=timeout,
1529 retry=retry,
1530 match_glob=match_glob,
1531 include_folders_as_prefixes=include_folders_as_prefixes,
1532 soft_deleted=soft_deleted,
1533 )
1534
1535 def list_notifications(
1536 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
1537 ):
1538 """List Pub / Sub notifications for this bucket.
1539
1540 See:
1541 https://cloud.google.com/storage/docs/json_api/v1/notifications/list
1542
1543 If :attr:`user_project` is set, bills the API request to that project.
1544
1545 :type client: :class:`~google.cloud.storage.client.Client` or
1546 ``NoneType``
1547 :param client: (Optional) The client to use. If not passed, falls back
1548 to the ``client`` stored on the current bucket.
1549 :type timeout: float or tuple
1550 :param timeout:
1551 (Optional) The amount of time, in seconds, to wait
1552 for the server response. See: :ref:`configuring_timeouts`
1553
1554 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1555 :param retry:
1556 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1557
1558 :rtype: list of :class:`.BucketNotification`
1559 :returns: notification instances
1560 """
1561 with create_trace_span(name="Storage.Bucket.listNotifications"):
1562 client = self._require_client(client)
1563 path = self.path + "/notificationConfigs"
1564 iterator = client._list_resource(
1565 path,
1566 _item_to_notification,
1567 timeout=timeout,
1568 retry=retry,
1569 )
1570 iterator.bucket = self
1571 return iterator
1572
1573 def get_notification(
1574 self,
1575 notification_id,
1576 client=None,
1577 timeout=_DEFAULT_TIMEOUT,
1578 retry=DEFAULT_RETRY,
1579 ):
1580 """Get Pub / Sub notification for this bucket.
1581
1582 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get)
1583 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python).
1584
1585 If :attr:`user_project` is set, bills the API request to that project.
1586
1587 :type notification_id: str
1588 :param notification_id: The notification id to retrieve the notification configuration.
1589
1590 :type client: :class:`~google.cloud.storage.client.Client` or
1591 ``NoneType``
1592 :param client: (Optional) The client to use. If not passed, falls back
1593 to the ``client`` stored on the current bucket.
1594 :type timeout: float or tuple
1595 :param timeout:
1596 (Optional) The amount of time, in seconds, to wait
1597 for the server response. See: :ref:`configuring_timeouts`
1598
1599 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1600 :param retry:
1601 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1602
1603 :rtype: :class:`.BucketNotification`
1604 :returns: notification instance.
1605 """
1606 with create_trace_span(name="Storage.Bucket.getNotification"):
1607 notification = self.notification(notification_id=notification_id)
1608 notification.reload(client=client, timeout=timeout, retry=retry)
1609 return notification
1610
1611 def delete(
1612 self,
1613 force=False,
1614 client=None,
1615 if_metageneration_match=None,
1616 if_metageneration_not_match=None,
1617 timeout=_DEFAULT_TIMEOUT,
1618 retry=DEFAULT_RETRY,
1619 ):
1620 """Delete this bucket.
1621
1622 The bucket **must** be empty in order to submit a delete request. If
1623 ``force=True`` is passed, this will first attempt to delete all the
1624 objects / blobs in the bucket (i.e. try to empty the bucket).
1625
1626 If the bucket doesn't exist, this will raise
1627 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
1628 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`.
1629
1630 If ``force=True`` and the bucket contains more than 256 objects / blobs
1631 this will cowardly refuse to delete the objects (or the bucket). This
1632 is to prevent accidental bucket deletion and to prevent extremely long
1633 runtime of this method. Also note that ``force=True`` is not supported
1634 in a ``Batch`` context.
1635
1636 If :attr:`user_project` is set, bills the API request to that project.
1637
1638 :type force: bool
1639 :param force: If True, empties the bucket's objects then deletes it.
1640
1641 :type client: :class:`~google.cloud.storage.client.Client` or
1642 ``NoneType``
1643 :param client: (Optional) The client to use. If not passed, falls back
1644 to the ``client`` stored on the current bucket.
1645
1646 :type if_metageneration_match: long
1647 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1648 blob's current metageneration matches the given value.
1649
1650 :type if_metageneration_not_match: long
1651 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1652 blob's current metageneration does not match the given value.
1653
1654 :type timeout: float or tuple
1655 :param timeout:
1656 (Optional) The amount of time, in seconds, to wait
1657 for the server response. See: :ref:`configuring_timeouts`
1658
1659 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1660 :param retry:
1661 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1662
1663 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
1664 contains more than 256 objects / blobs.
1665 """
1666 with create_trace_span(name="Storage.Bucket.delete"):
1667 client = self._require_client(client)
1668 query_params = {}
1669
1670 if self.user_project is not None:
1671 query_params["userProject"] = self.user_project
1672
1673 _add_generation_match_parameters(
1674 query_params,
1675 if_metageneration_match=if_metageneration_match,
1676 if_metageneration_not_match=if_metageneration_not_match,
1677 )
1678 if force:
1679 blobs = list(
1680 self.list_blobs(
1681 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
1682 client=client,
1683 timeout=timeout,
1684 retry=retry,
1685 versions=True,
1686 )
1687 )
1688 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
1689 message = (
1690 "Refusing to delete bucket with more than "
1691 "%d objects. If you actually want to delete "
1692 "this bucket, please delete the objects "
1693 "yourself before calling Bucket.delete()."
1694 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
1695 raise ValueError(message)
1696
1697 # Ignore 404 errors on delete.
1698 self.delete_blobs(
1699 blobs,
1700 on_error=lambda blob: None,
1701 client=client,
1702 timeout=timeout,
1703 retry=retry,
1704 preserve_generation=True,
1705 )
1706
1707 # We intentionally pass `_target_object=None` since a DELETE
1708 # request has no response value (whether in a standard request or
1709 # in a batch request).
1710 client._delete_resource(
1711 self.path,
1712 query_params=query_params,
1713 timeout=timeout,
1714 retry=retry,
1715 _target_object=None,
1716 )
1717
1718 def delete_blob(
1719 self,
1720 blob_name,
1721 client=None,
1722 generation=None,
1723 if_generation_match=None,
1724 if_generation_not_match=None,
1725 if_metageneration_match=None,
1726 if_metageneration_not_match=None,
1727 timeout=_DEFAULT_TIMEOUT,
1728 retry=DEFAULT_RETRY,
1729 ):
1730 """Deletes a blob from the current bucket.
1731
1732 If :attr:`user_project` is set, bills the API request to that project.
1733
1734 :type blob_name: str
1735 :param blob_name: A blob name to delete.
1736
1737 :type client: :class:`~google.cloud.storage.client.Client` or
1738 ``NoneType``
1739 :param client: (Optional) The client to use. If not passed, falls back
1740 to the ``client`` stored on the current bucket.
1741
1742 :type generation: long
1743 :param generation: (Optional) If present, permanently deletes a specific
1744 revision of this object.
1745
1746 :type if_generation_match: long
1747 :param if_generation_match:
1748 (Optional) See :ref:`using-if-generation-match`
1749
1750 :type if_generation_not_match: long
1751 :param if_generation_not_match:
1752 (Optional) See :ref:`using-if-generation-not-match`
1753
1754 :type if_metageneration_match: long
1755 :param if_metageneration_match:
1756 (Optional) See :ref:`using-if-metageneration-match`
1757
1758 :type if_metageneration_not_match: long
1759 :param if_metageneration_not_match:
1760 (Optional) See :ref:`using-if-metageneration-not-match`
1761
1762 :type timeout: float or tuple
1763 :param timeout:
1764 (Optional) The amount of time, in seconds, to wait
1765 for the server response. See: :ref:`configuring_timeouts`
1766
1767 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1768 :param retry: (Optional) How to retry the RPC. A None value will disable
1769 retries. A google.api_core.retry.Retry value will enable retries,
1770 and the object will define retriable response codes and errors and
1771 configure backoff and timeout options.
1772
1773 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1774 Retry object and activates it only if certain conditions are met.
1775 This class exists to provide safe defaults for RPC calls that are
1776 not technically safe to retry normally (due to potential data
1777 duplication or other side-effects) but become safe to retry if a
1778 condition such as if_generation_match is set.
1779
1780 See the retry.py source code and docstrings in this package
1781 (google.cloud.storage.retry) for information on retry types and how
1782 to configure them.
1783
1784 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound
1785 if the blob isn't found. To suppress
1786 the exception, use :meth:`delete_blobs` by passing a no-op
1787 ``on_error`` callback.
1788 """
1789 with create_trace_span(name="Storage.Bucket.deleteBlob"):
1790 client = self._require_client(client)
1791 blob = Blob(blob_name, bucket=self, generation=generation)
1792
1793 query_params = copy.deepcopy(blob._query_params)
1794 _add_generation_match_parameters(
1795 query_params,
1796 if_generation_match=if_generation_match,
1797 if_generation_not_match=if_generation_not_match,
1798 if_metageneration_match=if_metageneration_match,
1799 if_metageneration_not_match=if_metageneration_not_match,
1800 )
1801 # We intentionally pass `_target_object=None` since a DELETE
1802 # request has no response value (whether in a standard request or
1803 # in a batch request).
1804 client._delete_resource(
1805 blob.path,
1806 query_params=query_params,
1807 timeout=timeout,
1808 retry=retry,
1809 _target_object=None,
1810 )
1811
1812 def delete_blobs(
1813 self,
1814 blobs,
1815 on_error=None,
1816 client=None,
1817 preserve_generation=False,
1818 timeout=_DEFAULT_TIMEOUT,
1819 if_generation_match=None,
1820 if_generation_not_match=None,
1821 if_metageneration_match=None,
1822 if_metageneration_not_match=None,
1823 retry=DEFAULT_RETRY,
1824 ):
1825 """Deletes a list of blobs from the current bucket.
1826
1827 Uses :meth:`delete_blob` to delete each individual blob.
1828
1829 By default, any generation information in the list of blobs is ignored, and the
1830 live versions of all blobs are deleted. Set `preserve_generation` to True
1831 if blob generation should instead be propagated from the list of blobs.
1832
1833 If :attr:`user_project` is set, bills the API request to that project.
1834
1835 :type blobs: list
1836 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
1837 blob names to delete.
1838
1839 :type on_error: callable
1840 :param on_error: (Optional) Takes single argument: ``blob``.
1841 Called once for each blob raising
1842 :class:`~google.cloud.exceptions.NotFound`;
1843 otherwise, the exception is propagated.
1844 Note that ``on_error`` is not supported in a ``Batch`` context.
1845
1846 :type client: :class:`~google.cloud.storage.client.Client`
1847 :param client: (Optional) The client to use. If not passed, falls back
1848 to the ``client`` stored on the current bucket.
1849
1850 :type preserve_generation: bool
1851 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object,
1852 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob
1853 objects can have their generation set in this way.
1854 Default: False.
1855
1856 :type if_generation_match: list of long
1857 :param if_generation_match:
1858 (Optional) See :ref:`using-if-generation-match`
1859 Note that the length of the list must match the length of
1860 The list must match ``blobs`` item-to-item.
1861
1862 :type if_generation_not_match: list of long
1863 :param if_generation_not_match:
1864 (Optional) See :ref:`using-if-generation-not-match`
1865 The list must match ``blobs`` item-to-item.
1866
1867 :type if_metageneration_match: list of long
1868 :param if_metageneration_match:
1869 (Optional) See :ref:`using-if-metageneration-match`
1870 The list must match ``blobs`` item-to-item.
1871
1872 :type if_metageneration_not_match: list of long
1873 :param if_metageneration_not_match:
1874 (Optional) See :ref:`using-if-metageneration-not-match`
1875 The list must match ``blobs`` item-to-item.
1876
1877 :type timeout: float or tuple
1878 :param timeout:
1879 (Optional) The amount of time, in seconds, to wait
1880 for the server response. See: :ref:`configuring_timeouts`
1881
1882 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1883 :param retry: (Optional) How to retry the RPC. A None value will disable
1884 retries. A google.api_core.retry.Retry value will enable retries,
1885 and the object will define retriable response codes and errors and
1886 configure backoff and timeout options.
1887
1888 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
1889 Retry object and activates it only if certain conditions are met.
1890 This class exists to provide safe defaults for RPC calls that are
1891 not technically safe to retry normally (due to potential data
1892 duplication or other side-effects) but become safe to retry if a
1893 condition such as if_generation_match is set.
1894
1895 See the retry.py source code and docstrings in this package
1896 (google.cloud.storage.retry) for information on retry types and how
1897 to configure them.
1898
1899 :raises: :class:`~google.cloud.exceptions.NotFound` (if
1900 `on_error` is not passed).
1901 """
1902 with create_trace_span(name="Storage.Bucket.deleteBlobs"):
1903 _raise_if_len_differs(
1904 len(blobs),
1905 if_generation_match=if_generation_match,
1906 if_generation_not_match=if_generation_not_match,
1907 if_metageneration_match=if_metageneration_match,
1908 if_metageneration_not_match=if_metageneration_not_match,
1909 )
1910 if_generation_match = iter(if_generation_match or [])
1911 if_generation_not_match = iter(if_generation_not_match or [])
1912 if_metageneration_match = iter(if_metageneration_match or [])
1913 if_metageneration_not_match = iter(if_metageneration_not_match or [])
1914
1915 for blob in blobs:
1916 try:
1917 blob_name = blob
1918 generation = None
1919 if not isinstance(blob_name, str):
1920 blob_name = blob.name
1921 generation = blob.generation if preserve_generation else None
1922
1923 self.delete_blob(
1924 blob_name,
1925 client=client,
1926 generation=generation,
1927 if_generation_match=next(if_generation_match, None),
1928 if_generation_not_match=next(if_generation_not_match, None),
1929 if_metageneration_match=next(if_metageneration_match, None),
1930 if_metageneration_not_match=next(
1931 if_metageneration_not_match, None
1932 ),
1933 timeout=timeout,
1934 retry=retry,
1935 )
1936 except NotFound:
1937 if on_error is not None:
1938 on_error(blob)
1939 else:
1940 raise
1941
1942 def copy_blob(
1943 self,
1944 blob,
1945 destination_bucket,
1946 new_name=None,
1947 client=None,
1948 preserve_acl=True,
1949 source_generation=None,
1950 if_generation_match=None,
1951 if_generation_not_match=None,
1952 if_metageneration_match=None,
1953 if_metageneration_not_match=None,
1954 if_source_generation_match=None,
1955 if_source_generation_not_match=None,
1956 if_source_metageneration_match=None,
1957 if_source_metageneration_not_match=None,
1958 timeout=_DEFAULT_TIMEOUT,
1959 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1960 ):
1961 """Copy the given blob to the given bucket, optionally with a new name.
1962
1963 If :attr:`user_project` is set, bills the API request to that project.
1964
1965 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy)
1966 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python).
1967
1968 :type blob: :class:`google.cloud.storage.blob.Blob`
1969 :param blob: The blob to be copied.
1970
1971 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
1972 :param destination_bucket: The bucket into which the blob should be
1973 copied.
1974
1975 :type new_name: str
1976 :param new_name: (Optional) The new name for the copied file.
1977
1978 :type client: :class:`~google.cloud.storage.client.Client` or
1979 ``NoneType``
1980 :param client: (Optional) The client to use. If not passed, falls back
1981 to the ``client`` stored on the current bucket.
1982
1983 :type preserve_acl: bool
1984 :param preserve_acl: DEPRECATED. This argument is not functional!
1985 (Optional) Copies ACL from old blob to new blob.
1986 Default: True.
1987 Note that ``preserve_acl`` is not supported in a
1988 ``Batch`` context.
1989
1990 :type source_generation: long
1991 :param source_generation: (Optional) The generation of the blob to be
1992 copied.
1993
1994 :type if_generation_match: long
1995 :param if_generation_match:
1996 (Optional) See :ref:`using-if-generation-match`
1997 Note that the generation to be matched is that of the
1998 ``destination`` blob.
1999
2000 :type if_generation_not_match: long
2001 :param if_generation_not_match:
2002 (Optional) See :ref:`using-if-generation-not-match`
2003 Note that the generation to be matched is that of the
2004 ``destination`` blob.
2005
2006 :type if_metageneration_match: long
2007 :param if_metageneration_match:
2008 (Optional) See :ref:`using-if-metageneration-match`
2009 Note that the metageneration to be matched is that of the
2010 ``destination`` blob.
2011
2012 :type if_metageneration_not_match: long
2013 :param if_metageneration_not_match:
2014 (Optional) See :ref:`using-if-metageneration-not-match`
2015 Note that the metageneration to be matched is that of the
2016 ``destination`` blob.
2017
2018 :type if_source_generation_match: long
2019 :param if_source_generation_match:
2020 (Optional) Makes the operation conditional on whether the source
2021 object's generation matches the given value.
2022
2023 :type if_source_generation_not_match: long
2024 :param if_source_generation_not_match:
2025 (Optional) Makes the operation conditional on whether the source
2026 object's generation does not match the given value.
2027
2028 :type if_source_metageneration_match: long
2029 :param if_source_metageneration_match:
2030 (Optional) Makes the operation conditional on whether the source
2031 object's current metageneration matches the given value.
2032
2033 :type if_source_metageneration_not_match: long
2034 :param if_source_metageneration_not_match:
2035 (Optional) Makes the operation conditional on whether the source
2036 object's current metageneration does not match the given value.
2037
2038 :type timeout: float or tuple
2039 :param timeout:
2040 (Optional) The amount of time, in seconds, to wait
2041 for the server response. See: :ref:`configuring_timeouts`
2042
2043 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2044 :param retry:
2045 (Optional) How to retry the RPC.
2046 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2047 policy which will only enable retries if ``if_generation_match`` or ``generation``
2048 is set, in order to ensure requests are idempotent before retrying them.
2049 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2050 to enable retries regardless of generation precondition setting.
2051 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2052
2053 :rtype: :class:`google.cloud.storage.blob.Blob`
2054 :returns: The new Blob.
2055 """
2056 with create_trace_span(name="Storage.Bucket.copyBlob"):
2057 client = self._require_client(client)
2058 query_params = {}
2059
2060 if self.user_project is not None:
2061 query_params["userProject"] = self.user_project
2062
2063 if source_generation is not None:
2064 query_params["sourceGeneration"] = source_generation
2065
2066 _add_generation_match_parameters(
2067 query_params,
2068 if_generation_match=if_generation_match,
2069 if_generation_not_match=if_generation_not_match,
2070 if_metageneration_match=if_metageneration_match,
2071 if_metageneration_not_match=if_metageneration_not_match,
2072 if_source_generation_match=if_source_generation_match,
2073 if_source_generation_not_match=if_source_generation_not_match,
2074 if_source_metageneration_match=if_source_metageneration_match,
2075 if_source_metageneration_not_match=if_source_metageneration_not_match,
2076 )
2077
2078 if new_name is None:
2079 new_name = blob.name
2080
2081 new_blob = Blob(bucket=destination_bucket, name=new_name)
2082 api_path = blob.path + "/copyTo" + new_blob.path
2083 copy_result = client._post_resource(
2084 api_path,
2085 None,
2086 query_params=query_params,
2087 timeout=timeout,
2088 retry=retry,
2089 _target_object=new_blob,
2090 )
2091
2092 if not preserve_acl:
2093 new_blob.acl.save(acl={}, client=client, timeout=timeout)
2094
2095 new_blob._set_properties(copy_result)
2096 return new_blob
2097
2098 def rename_blob(
2099 self,
2100 blob,
2101 new_name,
2102 client=None,
2103 if_generation_match=None,
2104 if_generation_not_match=None,
2105 if_metageneration_match=None,
2106 if_metageneration_not_match=None,
2107 if_source_generation_match=None,
2108 if_source_generation_not_match=None,
2109 if_source_metageneration_match=None,
2110 if_source_metageneration_not_match=None,
2111 timeout=_DEFAULT_TIMEOUT,
2112 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2113 ):
2114 """Rename the given blob using copy and delete operations.
2115
2116 If :attr:`user_project` is set, bills the API request to that project.
2117
2118 Effectively, copies blob to the same bucket with a new name, then
2119 deletes the blob.
2120
2121 .. warning::
2122
2123 This method will first duplicate the data and then delete the
2124 old blob. This means that with very large objects renaming
2125 could be a very (temporarily) costly or a very slow operation.
2126 If you need more control over the copy and deletion, instead
2127 use ``google.cloud.storage.blob.Blob.copy_to`` and
2128 ``google.cloud.storage.blob.Blob.delete`` directly.
2129
2130 Also note that this method is not fully supported in a
2131 ``Batch`` context.
2132
2133 :type blob: :class:`google.cloud.storage.blob.Blob`
2134 :param blob: The blob to be renamed.
2135
2136 :type new_name: str
2137 :param new_name: The new name for this blob.
2138
2139 :type client: :class:`~google.cloud.storage.client.Client` or
2140 ``NoneType``
2141 :param client: (Optional) The client to use. If not passed, falls back
2142 to the ``client`` stored on the current bucket.
2143
2144 :type if_generation_match: long
2145 :param if_generation_match:
2146 (Optional) See :ref:`using-if-generation-match`
2147 Note that the generation to be matched is that of the
2148 ``destination`` blob.
2149
2150 :type if_generation_not_match: long
2151 :param if_generation_not_match:
2152 (Optional) See :ref:`using-if-generation-not-match`
2153 Note that the generation to be matched is that of the
2154 ``destination`` blob.
2155
2156 :type if_metageneration_match: long
2157 :param if_metageneration_match:
2158 (Optional) See :ref:`using-if-metageneration-match`
2159 Note that the metageneration to be matched is that of the
2160 ``destination`` blob.
2161
2162 :type if_metageneration_not_match: long
2163 :param if_metageneration_not_match:
2164 (Optional) See :ref:`using-if-metageneration-not-match`
2165 Note that the metageneration to be matched is that of the
2166 ``destination`` blob.
2167
2168 :type if_source_generation_match: long
2169 :param if_source_generation_match:
2170 (Optional) Makes the operation conditional on whether the source
2171 object's generation matches the given value. Also used in the
2172 (implied) delete request.
2173
2174 :type if_source_generation_not_match: long
2175 :param if_source_generation_not_match:
2176 (Optional) Makes the operation conditional on whether the source
2177 object's generation does not match the given value. Also used in
2178 the (implied) delete request.
2179
2180 :type if_source_metageneration_match: long
2181 :param if_source_metageneration_match:
2182 (Optional) Makes the operation conditional on whether the source
2183 object's current metageneration matches the given value. Also used
2184 in the (implied) delete request.
2185
2186 :type if_source_metageneration_not_match: long
2187 :param if_source_metageneration_not_match:
2188 (Optional) Makes the operation conditional on whether the source
2189 object's current metageneration does not match the given value.
2190 Also used in the (implied) delete request.
2191
2192 :type timeout: float or tuple
2193 :param timeout:
2194 (Optional) The amount of time, in seconds, to wait
2195 for the server response. See: :ref:`configuring_timeouts`
2196
2197 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2198 :param retry:
2199 (Optional) How to retry the RPC.
2200 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry
2201 policy which will only enable retries if ``if_generation_match`` or ``generation``
2202 is set, in order to ensure requests are idempotent before retrying them.
2203 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object
2204 to enable retries regardless of generation precondition setting.
2205 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2206
2207 :rtype: :class:`Blob`
2208 :returns: The newly-renamed blob.
2209 """
2210 with create_trace_span(name="Storage.Bucket.renameBlob"):
2211 same_name = blob.name == new_name
2212
2213 new_blob = self.copy_blob(
2214 blob,
2215 self,
2216 new_name,
2217 client=client,
2218 timeout=timeout,
2219 if_generation_match=if_generation_match,
2220 if_generation_not_match=if_generation_not_match,
2221 if_metageneration_match=if_metageneration_match,
2222 if_metageneration_not_match=if_metageneration_not_match,
2223 if_source_generation_match=if_source_generation_match,
2224 if_source_generation_not_match=if_source_generation_not_match,
2225 if_source_metageneration_match=if_source_metageneration_match,
2226 if_source_metageneration_not_match=if_source_metageneration_not_match,
2227 retry=retry,
2228 )
2229
2230 if not same_name:
2231 blob.delete(
2232 client=client,
2233 timeout=timeout,
2234 if_generation_match=if_source_generation_match,
2235 if_generation_not_match=if_source_generation_not_match,
2236 if_metageneration_match=if_source_metageneration_match,
2237 if_metageneration_not_match=if_source_metageneration_not_match,
2238 retry=retry,
2239 )
2240 return new_blob
2241
2242 def move_blob(
2243 self,
2244 blob,
2245 new_name,
2246 client=None,
2247 if_generation_match=None,
2248 if_generation_not_match=None,
2249 if_metageneration_match=None,
2250 if_metageneration_not_match=None,
2251 if_source_generation_match=None,
2252 if_source_generation_not_match=None,
2253 if_source_metageneration_match=None,
2254 if_source_metageneration_not_match=None,
2255 timeout=_DEFAULT_TIMEOUT,
2256 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2257 ):
2258 """Move a blob to a new name within a single HNS bucket.
2259
2260 *This feature is currently only supported for HNS (Heirarchical
2261 Namespace) buckets.*
2262
2263 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2264
2265 :type blob: :class:`google.cloud.storage.blob.Blob`
2266 :param blob: The blob to be renamed.
2267
2268 :type new_name: str
2269 :param new_name: The new name for this blob.
2270
2271 :type client: :class:`~google.cloud.storage.client.Client` or
2272 ``NoneType``
2273 :param client: (Optional) The client to use. If not passed, falls back
2274 to the ``client`` stored on the current bucket.
2275
2276 :type if_generation_match: int
2277 :param if_generation_match:
2278 (Optional) See :ref:`using-if-generation-match`
2279 Note that the generation to be matched is that of the
2280 ``destination`` blob.
2281
2282 :type if_generation_not_match: int
2283 :param if_generation_not_match:
2284 (Optional) See :ref:`using-if-generation-not-match`
2285 Note that the generation to be matched is that of the
2286 ``destination`` blob.
2287
2288 :type if_metageneration_match: int
2289 :param if_metageneration_match:
2290 (Optional) See :ref:`using-if-metageneration-match`
2291 Note that the metageneration to be matched is that of the
2292 ``destination`` blob.
2293
2294 :type if_metageneration_not_match: int
2295 :param if_metageneration_not_match:
2296 (Optional) See :ref:`using-if-metageneration-not-match`
2297 Note that the metageneration to be matched is that of the
2298 ``destination`` blob.
2299
2300 :type if_source_generation_match: int
2301 :param if_source_generation_match:
2302 (Optional) Makes the operation conditional on whether the source
2303 object's generation matches the given value.
2304
2305 :type if_source_generation_not_match: int
2306 :param if_source_generation_not_match:
2307 (Optional) Makes the operation conditional on whether the source
2308 object's generation does not match the given value.
2309
2310 :type if_source_metageneration_match: int
2311 :param if_source_metageneration_match:
2312 (Optional) Makes the operation conditional on whether the source
2313 object's current metageneration matches the given value.
2314
2315 :type if_source_metageneration_not_match: int
2316 :param if_source_metageneration_not_match:
2317 (Optional) Makes the operation conditional on whether the source
2318 object's current metageneration does not match the given value.
2319
2320 :type timeout: float or tuple
2321 :param timeout:
2322 (Optional) The amount of time, in seconds, to wait
2323 for the server response. See: :ref:`configuring_timeouts`
2324
2325 :type retry: google.api_core.retry.Retry
2326 :param retry:
2327 (Optional) How to retry the RPC.
2328 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2329
2330 :rtype: :class:`Blob`
2331 :returns: The newly-moved blob.
2332 """
2333 with create_trace_span(name="Storage.Bucket.moveBlob"):
2334 client = self._require_client(client)
2335 query_params = {}
2336
2337 if self.user_project is not None:
2338 query_params["userProject"] = self.user_project
2339
2340 _add_generation_match_parameters(
2341 query_params,
2342 if_generation_match=if_generation_match,
2343 if_generation_not_match=if_generation_not_match,
2344 if_metageneration_match=if_metageneration_match,
2345 if_metageneration_not_match=if_metageneration_not_match,
2346 if_source_generation_match=if_source_generation_match,
2347 if_source_generation_not_match=if_source_generation_not_match,
2348 if_source_metageneration_match=if_source_metageneration_match,
2349 if_source_metageneration_not_match=if_source_metageneration_not_match,
2350 )
2351
2352 new_blob = Blob(bucket=self, name=new_name)
2353 api_path = blob.path + "/moveTo/o/" + new_blob.name
2354 move_result = client._post_resource(
2355 api_path,
2356 None,
2357 query_params=query_params,
2358 timeout=timeout,
2359 retry=retry,
2360 _target_object=new_blob,
2361 )
2362
2363 new_blob._set_properties(move_result)
2364 return new_blob
2365
2366 def restore_blob(
2367 self,
2368 blob_name,
2369 client=None,
2370 generation=None,
2371 copy_source_acl=None,
2372 projection=None,
2373 if_generation_match=None,
2374 if_generation_not_match=None,
2375 if_metageneration_match=None,
2376 if_metageneration_not_match=None,
2377 timeout=_DEFAULT_TIMEOUT,
2378 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
2379 ):
2380 """Restores a soft-deleted object.
2381
2382 If :attr:`user_project` is set on the bucket, bills the API request to that project.
2383
2384 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore)
2385
2386 :type blob_name: str
2387 :param blob_name: The name of the blob to be restored.
2388
2389 :type client: :class:`~google.cloud.storage.client.Client`
2390 :param client: (Optional) The client to use. If not passed, falls back
2391 to the ``client`` stored on the current bucket.
2392
2393 :type generation: int
2394 :param generation: Selects the specific revision of the object.
2395
2396 :type copy_source_acl: bool
2397 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls.
2398
2399 :type projection: str
2400 :param projection: (Optional) Specifies the set of properties to return.
2401 If used, must be 'full' or 'noAcl'.
2402
2403 :type if_generation_match: long
2404 :param if_generation_match:
2405 (Optional) See :ref:`using-if-generation-match`
2406
2407 :type if_generation_not_match: long
2408 :param if_generation_not_match:
2409 (Optional) See :ref:`using-if-generation-not-match`
2410
2411 :type if_metageneration_match: long
2412 :param if_metageneration_match:
2413 (Optional) See :ref:`using-if-metageneration-match`
2414
2415 :type if_metageneration_not_match: long
2416 :param if_metageneration_not_match:
2417 (Optional) See :ref:`using-if-metageneration-not-match`
2418
2419 :type timeout: float or tuple
2420 :param timeout:
2421 (Optional) The amount of time, in seconds, to wait
2422 for the server response. See: :ref:`configuring_timeouts`
2423
2424 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2425 :param retry:
2426 (Optional) How to retry the RPC.
2427 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which
2428 only restore operations with ``if_generation_match`` or ``generation`` set
2429 will be retried.
2430
2431 Users can configure non-default retry behavior. A ``None`` value will
2432 disable retries. A ``DEFAULT_RETRY`` value will enable retries
2433 even if restore operations are not guaranteed to be idempotent.
2434 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout).
2435
2436 :rtype: :class:`google.cloud.storage.blob.Blob`
2437 :returns: The restored Blob.
2438 """
2439 with create_trace_span(name="Storage.Bucket.restore_blob"):
2440 client = self._require_client(client)
2441 query_params = {}
2442
2443 if self.user_project is not None:
2444 query_params["userProject"] = self.user_project
2445 if generation is not None:
2446 query_params["generation"] = generation
2447 if copy_source_acl is not None:
2448 query_params["copySourceAcl"] = copy_source_acl
2449 if projection is not None:
2450 query_params["projection"] = projection
2451
2452 _add_generation_match_parameters(
2453 query_params,
2454 if_generation_match=if_generation_match,
2455 if_generation_not_match=if_generation_not_match,
2456 if_metageneration_match=if_metageneration_match,
2457 if_metageneration_not_match=if_metageneration_not_match,
2458 )
2459
2460 blob = Blob(bucket=self, name=blob_name)
2461 api_response = client._post_resource(
2462 f"{blob.path}/restore",
2463 None,
2464 query_params=query_params,
2465 timeout=timeout,
2466 retry=retry,
2467 )
2468 blob._set_properties(api_response)
2469 return blob
2470
2471 @property
2472 def cors(self):
2473 """Retrieve or set CORS policies configured for this bucket.
2474
2475 See http://www.w3.org/TR/cors/ and
2476 https://cloud.google.com/storage/docs/json_api/v1/buckets
2477
2478 .. note::
2479
2480 The getter for this property returns a list which contains
2481 *copies* of the bucket's CORS policy mappings. Mutating the list
2482 or one of its dicts has no effect unless you then re-assign the
2483 dict via the setter. E.g.:
2484
2485 >>> policies = bucket.cors
2486 >>> policies.append({'origin': '/foo', ...})
2487 >>> policies[1]['maxAgeSeconds'] = 3600
2488 >>> del policies[0]
2489 >>> bucket.cors = policies
2490 >>> bucket.update()
2491
2492 :setter: Set CORS policies for this bucket.
2493 :getter: Gets the CORS policies for this bucket.
2494
2495 :rtype: list of dictionaries
2496 :returns: A sequence of mappings describing each CORS policy.
2497 """
2498 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())]
2499
2500 @cors.setter
2501 def cors(self, entries):
2502 """Set CORS policies configured for this bucket.
2503
2504 See http://www.w3.org/TR/cors/ and
2505 https://cloud.google.com/storage/docs/json_api/v1/buckets
2506
2507 :type entries: list of dictionaries
2508 :param entries: A sequence of mappings describing each CORS policy.
2509 """
2510 self._patch_property("cors", entries)
2511
2512 default_event_based_hold = _scalar_property("defaultEventBasedHold")
2513 """Are uploaded objects automatically placed under an even-based hold?
2514
2515 If True, uploaded objects will be placed under an event-based hold to
2516 be released at a future time. When released an object will then begin
2517 the retention period determined by the policy retention period for the
2518 object bucket.
2519
2520 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2521
2522 If the property is not set locally, returns ``None``.
2523
2524 :rtype: bool or ``NoneType``
2525 """
2526
2527 @property
2528 def default_kms_key_name(self):
2529 """Retrieve / set default KMS encryption key for objects in the bucket.
2530
2531 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2532
2533 :setter: Set default KMS encryption key for items in this bucket.
2534 :getter: Get default KMS encryption key for items in this bucket.
2535
2536 :rtype: str
2537 :returns: Default KMS encryption key, or ``None`` if not set.
2538 """
2539 encryption_config = self._properties.get("encryption", {})
2540 return encryption_config.get("defaultKmsKeyName")
2541
2542 @default_kms_key_name.setter
2543 def default_kms_key_name(self, value):
2544 """Set default KMS encryption key for objects in the bucket.
2545
2546 :type value: str or None
2547 :param value: new KMS key name (None to clear any existing key).
2548 """
2549 encryption_config = self._properties.get("encryption", {})
2550 encryption_config["defaultKmsKeyName"] = value
2551 self._patch_property("encryption", encryption_config)
2552
2553 @property
2554 def labels(self):
2555 """Retrieve or set labels assigned to this bucket.
2556
2557 See
2558 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2559
2560 .. note::
2561
2562 The getter for this property returns a dict which is a *copy*
2563 of the bucket's labels. Mutating that dict has no effect unless
2564 you then re-assign the dict via the setter. E.g.:
2565
2566 >>> labels = bucket.labels
2567 >>> labels['new_key'] = 'some-label'
2568 >>> del labels['old_key']
2569 >>> bucket.labels = labels
2570 >>> bucket.update()
2571
2572 :setter: Set labels for this bucket.
2573 :getter: Gets the labels for this bucket.
2574
2575 :rtype: :class:`dict`
2576 :returns: Name-value pairs (string->string) labelling the bucket.
2577 """
2578 labels = self._properties.get("labels")
2579 if labels is None:
2580 return {}
2581 return copy.deepcopy(labels)
2582
2583 @labels.setter
2584 def labels(self, mapping):
2585 """Set labels assigned to this bucket.
2586
2587 See
2588 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2589
2590 :type mapping: :class:`dict`
2591 :param mapping: Name-value pairs (string->string) labelling the bucket.
2592 """
2593 # If any labels have been expressly removed, we need to track this
2594 # so that a future .patch() call can do the correct thing.
2595 existing = set([k for k in self.labels.keys()])
2596 incoming = set([k for k in mapping.keys()])
2597 self._label_removals = self._label_removals.union(existing.difference(incoming))
2598 mapping = {k: str(v) for k, v in mapping.items()}
2599
2600 # Actually update the labels on the object.
2601 self._patch_property("labels", copy.deepcopy(mapping))
2602
2603 @property
2604 def etag(self):
2605 """Retrieve the ETag for the bucket.
2606
2607 See https://tools.ietf.org/html/rfc2616#section-3.11 and
2608 https://cloud.google.com/storage/docs/json_api/v1/buckets
2609
2610 :rtype: str or ``NoneType``
2611 :returns: The bucket etag or ``None`` if the bucket's
2612 resource has not been loaded from the server.
2613 """
2614 return self._properties.get("etag")
2615
2616 @property
2617 def id(self):
2618 """Retrieve the ID for the bucket.
2619
2620 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2621
2622 :rtype: str or ``NoneType``
2623 :returns: The ID of the bucket or ``None`` if the bucket's
2624 resource has not been loaded from the server.
2625 """
2626 return self._properties.get("id")
2627
2628 @property
2629 def iam_configuration(self):
2630 """Retrieve IAM configuration for this bucket.
2631
2632 :rtype: :class:`IAMConfiguration`
2633 :returns: an instance for managing the bucket's IAM configuration.
2634 """
2635 info = self._properties.get("iamConfiguration", {})
2636 return IAMConfiguration.from_api_repr(info, self)
2637
2638 @property
2639 def soft_delete_policy(self):
2640 """Retrieve the soft delete policy for this bucket.
2641
2642 See https://cloud.google.com/storage/docs/soft-delete
2643
2644 :rtype: :class:`SoftDeletePolicy`
2645 :returns: an instance for managing the bucket's soft delete policy.
2646 """
2647 policy = self._properties.get("softDeletePolicy", {})
2648 return SoftDeletePolicy.from_api_repr(policy, self)
2649
2650 @property
2651 def lifecycle_rules(self):
2652 """Retrieve or set lifecycle rules configured for this bucket.
2653
2654 See https://cloud.google.com/storage/docs/lifecycle and
2655 https://cloud.google.com/storage/docs/json_api/v1/buckets
2656
2657 .. note::
2658
2659 The getter for this property returns a generator which yields
2660 *copies* of the bucket's lifecycle rules mappings. Mutating the
2661 output dicts has no effect unless you then re-assign the dict via
2662 the setter. E.g.:
2663
2664 >>> rules = list(bucket.lifecycle_rules)
2665 >>> rules.append({'origin': '/foo', ...})
2666 >>> rules[1]['rule']['action']['type'] = 'Delete'
2667 >>> del rules[0]
2668 >>> bucket.lifecycle_rules = rules
2669 >>> bucket.update()
2670
2671 :setter: Set lifecycle rules for this bucket.
2672 :getter: Gets the lifecycle rules for this bucket.
2673
2674 :rtype: generator(dict)
2675 :returns: A sequence of mappings describing each lifecycle rule.
2676 """
2677 info = self._properties.get("lifecycle", {})
2678 for rule in info.get("rule", ()):
2679 action_type = rule["action"]["type"]
2680 if action_type == "Delete":
2681 yield LifecycleRuleDelete.from_api_repr(rule)
2682 elif action_type == "SetStorageClass":
2683 yield LifecycleRuleSetStorageClass.from_api_repr(rule)
2684 elif action_type == "AbortIncompleteMultipartUpload":
2685 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule)
2686 else:
2687 warnings.warn(
2688 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format(
2689 rule
2690 ),
2691 UserWarning,
2692 stacklevel=1,
2693 )
2694
2695 @lifecycle_rules.setter
2696 def lifecycle_rules(self, rules):
2697 """Set lifecycle rules configured for this bucket.
2698
2699 See https://cloud.google.com/storage/docs/lifecycle and
2700 https://cloud.google.com/storage/docs/json_api/v1/buckets
2701
2702 :type rules: list of dictionaries
2703 :param rules: A sequence of mappings describing each lifecycle rule.
2704 """
2705 rules = [dict(rule) for rule in rules] # Convert helpers if needed
2706 self._patch_property("lifecycle", {"rule": rules})
2707
2708 def clear_lifecycle_rules(self):
2709 """Clear lifecycle rules configured for this bucket.
2710
2711 See https://cloud.google.com/storage/docs/lifecycle and
2712 https://cloud.google.com/storage/docs/json_api/v1/buckets
2713 """
2714 self.lifecycle_rules = []
2715
2716 def clear_lifecyle_rules(self):
2717 """Deprecated alias for clear_lifecycle_rules."""
2718 return self.clear_lifecycle_rules()
2719
2720 def add_lifecycle_delete_rule(self, **kw):
2721 """Add a "delete" rule to lifecycle rules configured for this bucket.
2722
2723 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2724 which is set on the bucket. For the general format of a lifecycle configuration, see the
2725 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2726 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python).
2727
2728 :type kw: dict
2729 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2730 """
2731 rules = list(self.lifecycle_rules)
2732 rules.append(LifecycleRuleDelete(**kw))
2733 self.lifecycle_rules = rules
2734
2735 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
2736 """Add a "set storage class" rule to lifecycle rules.
2737
2738 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2739 which is set on the bucket. For the general format of a lifecycle configuration, see the
2740 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2741
2742 :type storage_class: str, one of :attr:`STORAGE_CLASSES`.
2743 :param storage_class: new storage class to assign to matching items.
2744
2745 :type kw: dict
2746 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2747 """
2748 rules = list(self.lifecycle_rules)
2749 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
2750 self.lifecycle_rules = rules
2751
2752 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw):
2753 """Add a "abort incomplete multipart upload" rule to lifecycle rules.
2754
2755 .. note::
2756 The "age" lifecycle condition is the only supported condition
2757 for this rule.
2758
2759 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle),
2760 which is set on the bucket. For the general format of a lifecycle configuration, see the
2761 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets).
2762
2763 :type kw: dict
2764 :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2765 """
2766 rules = list(self.lifecycle_rules)
2767 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw))
2768 self.lifecycle_rules = rules
2769
2770 _location = _scalar_property("location")
2771
2772 @property
2773 def location(self):
2774 """Retrieve location configured for this bucket.
2775
2776 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2777 https://cloud.google.com/storage/docs/locations
2778
2779 Returns ``None`` if the property has not been set before creation,
2780 or if the bucket's resource has not been loaded from the server.
2781 :rtype: str or ``NoneType``
2782 """
2783 return self._location
2784
2785 @location.setter
2786 def location(self, value):
2787 """(Deprecated) Set `Bucket.location`
2788
2789 This can only be set at bucket **creation** time.
2790
2791 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2792 https://cloud.google.com/storage/docs/bucket-locations
2793
2794 .. warning::
2795
2796 Assignment to 'Bucket.location' is deprecated, as it is only
2797 valid before the bucket is created. Instead, pass the location
2798 to `Bucket.create`.
2799 """
2800 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
2801 self._location = value
2802
2803 @property
2804 def data_locations(self):
2805 """Retrieve the list of regional locations for custom dual-region buckets.
2806
2807 See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2808 https://cloud.google.com/storage/docs/locations
2809
2810 Returns ``None`` if the property has not been set before creation,
2811 if the bucket's resource has not been loaded from the server,
2812 or if the bucket is not a dual-regions bucket.
2813 :rtype: list of str or ``NoneType``
2814 """
2815 custom_placement_config = self._properties.get("customPlacementConfig", {})
2816 return custom_placement_config.get("dataLocations")
2817
2818 @property
2819 def location_type(self):
2820 """Retrieve the location type for the bucket.
2821
2822 See https://cloud.google.com/storage/docs/storage-classes
2823
2824 :getter: Gets the the location type for this bucket.
2825
2826 :rtype: str or ``NoneType``
2827 :returns:
2828 If set, one of
2829 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`,
2830 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or
2831 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`,
2832 else ``None``.
2833 """
2834 return self._properties.get("locationType")
2835
2836 def get_logging(self):
2837 """Return info about access logging for this bucket.
2838
2839 See https://cloud.google.com/storage/docs/access-logs#status
2840
2841 :rtype: dict or None
2842 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
2843 (if logging is enabled), or None (if not).
2844 """
2845 info = self._properties.get("logging")
2846 return copy.deepcopy(info)
2847
2848 def enable_logging(self, bucket_name, object_prefix=""):
2849 """Enable access logging for this bucket.
2850
2851 See https://cloud.google.com/storage/docs/access-logs
2852
2853 :type bucket_name: str
2854 :param bucket_name: name of bucket in which to store access logs
2855
2856 :type object_prefix: str
2857 :param object_prefix: prefix for access log filenames
2858 """
2859 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
2860 self._patch_property("logging", info)
2861
2862 def disable_logging(self):
2863 """Disable access logging for this bucket.
2864
2865 See https://cloud.google.com/storage/docs/access-logs#disabling
2866 """
2867 self._patch_property("logging", None)
2868
2869 @property
2870 def metageneration(self):
2871 """Retrieve the metageneration for the bucket.
2872
2873 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2874
2875 :rtype: int or ``NoneType``
2876 :returns: The metageneration of the bucket or ``None`` if the bucket's
2877 resource has not been loaded from the server.
2878 """
2879 metageneration = self._properties.get("metageneration")
2880 if metageneration is not None:
2881 return int(metageneration)
2882
2883 @property
2884 def owner(self):
2885 """Retrieve info about the owner of the bucket.
2886
2887 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2888
2889 :rtype: dict or ``NoneType``
2890 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
2891 resource has not been loaded from the server.
2892 """
2893 return copy.deepcopy(self._properties.get("owner"))
2894
2895 @property
2896 def project_number(self):
2897 """Retrieve the number of the project to which the bucket is assigned.
2898
2899 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2900
2901 :rtype: int or ``NoneType``
2902 :returns: The project number that owns the bucket or ``None`` if
2903 the bucket's resource has not been loaded from the server.
2904 """
2905 project_number = self._properties.get("projectNumber")
2906 if project_number is not None:
2907 return int(project_number)
2908
2909 @property
2910 def retention_policy_effective_time(self):
2911 """Retrieve the effective time of the bucket's retention policy.
2912
2913 :rtype: datetime.datetime or ``NoneType``
2914 :returns: point-in time at which the bucket's retention policy is
2915 effective, or ``None`` if the property is not
2916 set locally.
2917 """
2918 policy = self._properties.get("retentionPolicy")
2919 if policy is not None:
2920 timestamp = policy.get("effectiveTime")
2921 if timestamp is not None:
2922 return _rfc3339_nanos_to_datetime(timestamp)
2923
2924 @property
2925 def retention_policy_locked(self):
2926 """Retrieve whthere the bucket's retention policy is locked.
2927
2928 :rtype: bool
2929 :returns: True if the bucket's policy is locked, or else False
2930 if the policy is not locked, or the property is not
2931 set locally.
2932 """
2933 policy = self._properties.get("retentionPolicy")
2934 if policy is not None:
2935 return policy.get("isLocked")
2936
2937 @property
2938 def retention_period(self):
2939 """Retrieve or set the retention period for items in the bucket.
2940
2941 :rtype: int or ``NoneType``
2942 :returns: number of seconds to retain items after upload or release
2943 from event-based lock, or ``None`` if the property is not
2944 set locally.
2945 """
2946 policy = self._properties.get("retentionPolicy")
2947 if policy is not None:
2948 period = policy.get("retentionPeriod")
2949 if period is not None:
2950 return int(period)
2951
2952 @retention_period.setter
2953 def retention_period(self, value):
2954 """Set the retention period for items in the bucket.
2955
2956 :type value: int
2957 :param value:
2958 number of seconds to retain items after upload or release from
2959 event-based lock.
2960
2961 :raises ValueError: if the bucket's retention policy is locked.
2962 """
2963 policy = self._properties.setdefault("retentionPolicy", {})
2964 if value is not None:
2965 policy["retentionPeriod"] = str(value)
2966 else:
2967 policy = None
2968 self._patch_property("retentionPolicy", policy)
2969
2970 @property
2971 def self_link(self):
2972 """Retrieve the URI for the bucket.
2973
2974 See https://cloud.google.com/storage/docs/json_api/v1/buckets
2975
2976 :rtype: str or ``NoneType``
2977 :returns: The self link for the bucket or ``None`` if
2978 the bucket's resource has not been loaded from the server.
2979 """
2980 return self._properties.get("selfLink")
2981
2982 @property
2983 def storage_class(self):
2984 """Retrieve or set the storage class for the bucket.
2985
2986 See https://cloud.google.com/storage/docs/storage-classes
2987
2988 :setter: Set the storage class for this bucket.
2989 :getter: Gets the the storage class for this bucket.
2990
2991 :rtype: str or ``NoneType``
2992 :returns:
2993 If set, one of
2994 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
2995 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
2996 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
2997 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
2998 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
2999 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3000 or
3001 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3002 else ``None``.
3003 """
3004 return self._properties.get("storageClass")
3005
3006 @storage_class.setter
3007 def storage_class(self, value):
3008 """Set the storage class for the bucket.
3009
3010 See https://cloud.google.com/storage/docs/storage-classes
3011
3012 :type value: str
3013 :param value:
3014 One of
3015 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
3016 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
3017 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
3018 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
3019 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
3020 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
3021 or
3022 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
3023 """
3024 self._patch_property("storageClass", value)
3025
3026 @property
3027 def time_created(self):
3028 """Retrieve the timestamp at which the bucket was created.
3029
3030 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3031
3032 :rtype: :class:`datetime.datetime` or ``NoneType``
3033 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3034 ``None`` if the bucket's resource has not been loaded
3035 from the server.
3036 """
3037 value = self._properties.get("timeCreated")
3038 if value is not None:
3039 return _rfc3339_nanos_to_datetime(value)
3040
3041 @property
3042 def updated(self):
3043 """Retrieve the timestamp at which the bucket was last updated.
3044
3045 See https://cloud.google.com/storage/docs/json_api/v1/buckets
3046
3047 :rtype: :class:`datetime.datetime` or ``NoneType``
3048 :returns: Datetime object parsed from RFC3339 valid timestamp, or
3049 ``None`` if the bucket's resource has not been loaded
3050 from the server.
3051 """
3052 value = self._properties.get("updated")
3053 if value is not None:
3054 return _rfc3339_nanos_to_datetime(value)
3055
3056 @property
3057 def versioning_enabled(self):
3058 """Is versioning enabled for this bucket?
3059
3060 See https://cloud.google.com/storage/docs/object-versioning for
3061 details.
3062
3063 :setter: Update whether versioning is enabled for this bucket.
3064 :getter: Query whether versioning is enabled for this bucket.
3065
3066 :rtype: bool
3067 :returns: True if enabled, else False.
3068 """
3069 versioning = self._properties.get("versioning", {})
3070 return versioning.get("enabled", False)
3071
3072 @versioning_enabled.setter
3073 def versioning_enabled(self, value):
3074 """Enable versioning for this bucket.
3075
3076 See https://cloud.google.com/storage/docs/object-versioning for
3077 details.
3078
3079 :type value: convertible to boolean
3080 :param value: should versioning be enabled for the bucket?
3081 """
3082 self._patch_property("versioning", {"enabled": bool(value)})
3083
3084 @property
3085 def requester_pays(self):
3086 """Does the requester pay for API requests for this bucket?
3087
3088 See https://cloud.google.com/storage/docs/requester-pays for
3089 details.
3090
3091 :setter: Update whether requester pays for this bucket.
3092 :getter: Query whether requester pays for this bucket.
3093
3094 :rtype: bool
3095 :returns: True if requester pays for API requests for the bucket,
3096 else False.
3097 """
3098 versioning = self._properties.get("billing", {})
3099 return versioning.get("requesterPays", False)
3100
3101 @requester_pays.setter
3102 def requester_pays(self, value):
3103 """Update whether requester pays for API requests for this bucket.
3104
3105 See https://cloud.google.com/storage/docs/using-requester-pays for
3106 details.
3107
3108 :type value: convertible to boolean
3109 :param value: should requester pay for API requests for the bucket?
3110 """
3111 self._patch_property("billing", {"requesterPays": bool(value)})
3112
3113 @property
3114 def autoclass_enabled(self):
3115 """Whether Autoclass is enabled for this bucket.
3116
3117 See https://cloud.google.com/storage/docs/using-autoclass for details.
3118
3119 :setter: Update whether autoclass is enabled for this bucket.
3120 :getter: Query whether autoclass is enabled for this bucket.
3121
3122 :rtype: bool
3123 :returns: True if enabled, else False.
3124 """
3125 autoclass = self._properties.get("autoclass", {})
3126 return autoclass.get("enabled", False)
3127
3128 @autoclass_enabled.setter
3129 def autoclass_enabled(self, value):
3130 """Enable or disable Autoclass at the bucket-level.
3131
3132 See https://cloud.google.com/storage/docs/using-autoclass for details.
3133
3134 :type value: convertible to boolean
3135 :param value: If true, enable Autoclass for this bucket.
3136 If false, disable Autoclass for this bucket.
3137 """
3138 autoclass = self._properties.get("autoclass", {})
3139 autoclass["enabled"] = bool(value)
3140 self._patch_property("autoclass", autoclass)
3141
3142 @property
3143 def autoclass_toggle_time(self):
3144 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket.
3145 :rtype: datetime.datetime or ``NoneType``
3146 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally.
3147 """
3148 autoclass = self._properties.get("autoclass")
3149 if autoclass is not None:
3150 timestamp = autoclass.get("toggleTime")
3151 if timestamp is not None:
3152 return _rfc3339_nanos_to_datetime(timestamp)
3153
3154 @property
3155 def autoclass_terminal_storage_class(self):
3156 """The storage class that objects in an Autoclass bucket eventually transition to if
3157 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3158
3159 See https://cloud.google.com/storage/docs/using-autoclass for details.
3160
3161 :setter: Set the terminal storage class for Autoclass configuration.
3162 :getter: Get the terminal storage class for Autoclass configuration.
3163
3164 :rtype: str
3165 :returns: The terminal storage class if Autoclass is enabled, else ``None``.
3166 """
3167 autoclass = self._properties.get("autoclass", {})
3168 return autoclass.get("terminalStorageClass", None)
3169
3170 @autoclass_terminal_storage_class.setter
3171 def autoclass_terminal_storage_class(self, value):
3172 """The storage class that objects in an Autoclass bucket eventually transition to if
3173 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.
3174
3175 See https://cloud.google.com/storage/docs/using-autoclass for details.
3176
3177 :type value: str
3178 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`.
3179 """
3180 autoclass = self._properties.get("autoclass", {})
3181 autoclass["terminalStorageClass"] = value
3182 self._patch_property("autoclass", autoclass)
3183
3184 @property
3185 def autoclass_terminal_storage_class_update_time(self):
3186 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket
3187 :rtype: datetime.datetime or ``NoneType``
3188 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally.
3189 """
3190 autoclass = self._properties.get("autoclass")
3191 if autoclass is not None:
3192 timestamp = autoclass.get("terminalStorageClassUpdateTime")
3193 if timestamp is not None:
3194 return _rfc3339_nanos_to_datetime(timestamp)
3195
3196 @property
3197 def object_retention_mode(self):
3198 """Retrieve the object retention mode set on the bucket.
3199
3200 :rtype: str
3201 :returns: When set to Enabled, retention configurations can be
3202 set on objects in the bucket.
3203 """
3204 object_retention = self._properties.get("objectRetention")
3205 if object_retention is not None:
3206 return object_retention.get("mode")
3207
3208 @property
3209 def hierarchical_namespace_enabled(self):
3210 """Whether hierarchical namespace is enabled for this bucket.
3211
3212 :setter: Update whether hierarchical namespace is enabled for this bucket.
3213 :getter: Query whether hierarchical namespace is enabled for this bucket.
3214
3215 :rtype: bool
3216 :returns: True if enabled, else False.
3217 """
3218 hns = self._properties.get("hierarchicalNamespace", {})
3219 return hns.get("enabled")
3220
3221 @hierarchical_namespace_enabled.setter
3222 def hierarchical_namespace_enabled(self, value):
3223 """Enable or disable hierarchical namespace at the bucket-level.
3224
3225 :type value: convertible to boolean
3226 :param value: If true, enable hierarchical namespace for this bucket.
3227 If false, disable hierarchical namespace for this bucket.
3228
3229 .. note::
3230 To enable hierarchical namespace, you must set it at bucket creation time.
3231 Currently, hierarchical namespace configuration cannot be changed after bucket creation.
3232 """
3233 hns = self._properties.get("hierarchicalNamespace", {})
3234 hns["enabled"] = bool(value)
3235 self._patch_property("hierarchicalNamespace", hns)
3236
3237 def configure_website(self, main_page_suffix=None, not_found_page=None):
3238 """Configure website-related properties.
3239
3240 See https://cloud.google.com/storage/docs/static-website
3241
3242 .. note::
3243 This configures the bucket's website-related properties,controlling how
3244 the service behaves when accessing bucket contents as a web site.
3245 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and
3246 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python)
3247 for more information.
3248
3249 :type main_page_suffix: str
3250 :param main_page_suffix: The page to use as the main page
3251 of a directory.
3252 Typically something like index.html.
3253
3254 :type not_found_page: str
3255 :param not_found_page: The file to use when a page isn't found.
3256 """
3257 data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page}
3258 self._patch_property("website", data)
3259
3260 def disable_website(self):
3261 """Disable the website configuration for this bucket.
3262
3263 This is really just a shortcut for setting the website-related
3264 attributes to ``None``.
3265 """
3266 return self.configure_website(None, None)
3267
3268 def get_iam_policy(
3269 self,
3270 client=None,
3271 requested_policy_version=None,
3272 timeout=_DEFAULT_TIMEOUT,
3273 retry=DEFAULT_RETRY,
3274 ):
3275 """Retrieve the IAM policy for the bucket.
3276
3277 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy)
3278 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python).
3279
3280 If :attr:`user_project` is set, bills the API request to that project.
3281
3282 :type client: :class:`~google.cloud.storage.client.Client` or
3283 ``NoneType``
3284 :param client: (Optional) The client to use. If not passed, falls back
3285 to the ``client`` stored on the current bucket.
3286
3287 :type requested_policy_version: int or ``NoneType``
3288 :param requested_policy_version: (Optional) The version of IAM policies to request.
3289 If a policy with a condition is requested without
3290 setting this, the server will return an error.
3291 This must be set to a value of 3 to retrieve IAM
3292 policies containing conditions. This is to prevent
3293 client code that isn't aware of IAM conditions from
3294 interpreting and modifying policies incorrectly.
3295 The service might return a policy with version lower
3296 than the one that was requested, based on the
3297 feature syntax in the policy fetched.
3298
3299 :type timeout: float or tuple
3300 :param timeout:
3301 (Optional) The amount of time, in seconds, to wait
3302 for the server response. See: :ref:`configuring_timeouts`
3303
3304 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3305 :param retry:
3306 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3307
3308 :rtype: :class:`google.api_core.iam.Policy`
3309 :returns: the policy instance, based on the resource returned from
3310 the ``getIamPolicy`` API request.
3311 """
3312 with create_trace_span(name="Storage.Bucket.getIamPolicy"):
3313 client = self._require_client(client)
3314 query_params = {}
3315
3316 if self.user_project is not None:
3317 query_params["userProject"] = self.user_project
3318
3319 if requested_policy_version is not None:
3320 query_params["optionsRequestedPolicyVersion"] = requested_policy_version
3321
3322 info = client._get_resource(
3323 f"{self.path}/iam",
3324 query_params=query_params,
3325 timeout=timeout,
3326 retry=retry,
3327 _target_object=None,
3328 )
3329 return Policy.from_api_repr(info)
3330
3331 def set_iam_policy(
3332 self,
3333 policy,
3334 client=None,
3335 timeout=_DEFAULT_TIMEOUT,
3336 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON,
3337 ):
3338 """Update the IAM policy for the bucket.
3339
3340 See
3341 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
3342
3343 If :attr:`user_project` is set, bills the API request to that project.
3344
3345 :type policy: :class:`google.api_core.iam.Policy`
3346 :param policy: policy instance used to update bucket's IAM policy.
3347
3348 :type client: :class:`~google.cloud.storage.client.Client` or
3349 ``NoneType``
3350 :param client: (Optional) The client to use. If not passed, falls back
3351 to the ``client`` stored on the current bucket.
3352
3353 :type timeout: float or tuple
3354 :param timeout:
3355 (Optional) The amount of time, in seconds, to wait
3356 for the server response. See: :ref:`configuring_timeouts`
3357
3358 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3359 :param retry:
3360 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3361
3362 :rtype: :class:`google.api_core.iam.Policy`
3363 :returns: the policy instance, based on the resource returned from
3364 the ``setIamPolicy`` API request.
3365 """
3366 with create_trace_span(name="Storage.Bucket.setIamPolicy"):
3367 client = self._require_client(client)
3368 query_params = {}
3369
3370 if self.user_project is not None:
3371 query_params["userProject"] = self.user_project
3372
3373 path = f"{self.path}/iam"
3374 resource = policy.to_api_repr()
3375 resource["resourceId"] = self.path
3376
3377 info = client._put_resource(
3378 path,
3379 resource,
3380 query_params=query_params,
3381 timeout=timeout,
3382 retry=retry,
3383 _target_object=None,
3384 )
3385
3386 return Policy.from_api_repr(info)
3387
3388 def test_iam_permissions(
3389 self, permissions, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3390 ):
3391 """API call: test permissions
3392
3393 See
3394 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
3395
3396 If :attr:`user_project` is set, bills the API request to that project.
3397
3398 :type permissions: list of string
3399 :param permissions: the permissions to check
3400
3401 :type client: :class:`~google.cloud.storage.client.Client` or
3402 ``NoneType``
3403 :param client: (Optional) The client to use. If not passed, falls back
3404 to the ``client`` stored on the current bucket.
3405
3406 :type timeout: float or tuple
3407 :param timeout:
3408 (Optional) The amount of time, in seconds, to wait
3409 for the server response. See: :ref:`configuring_timeouts`
3410
3411 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3412 :param retry:
3413 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3414
3415 :rtype: list of string
3416 :returns: the permissions returned by the ``testIamPermissions`` API
3417 request.
3418 """
3419 with create_trace_span(name="Storage.Bucket.testIamPermissions"):
3420 client = self._require_client(client)
3421 query_params = {"permissions": permissions}
3422
3423 if self.user_project is not None:
3424 query_params["userProject"] = self.user_project
3425
3426 path = f"{self.path}/iam/testPermissions"
3427 resp = client._get_resource(
3428 path,
3429 query_params=query_params,
3430 timeout=timeout,
3431 retry=retry,
3432 _target_object=None,
3433 )
3434 return resp.get("permissions", [])
3435
3436 def make_public(
3437 self,
3438 recursive=False,
3439 future=False,
3440 client=None,
3441 timeout=_DEFAULT_TIMEOUT,
3442 if_metageneration_match=None,
3443 if_metageneration_not_match=None,
3444 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3445 ):
3446 """Update bucket's ACL, granting read access to anonymous users.
3447
3448 :type recursive: bool
3449 :param recursive: If True, this will make all blobs inside the bucket
3450 public as well.
3451
3452 :type future: bool
3453 :param future: If True, this will make all objects created in the
3454 future public as well.
3455
3456 :type client: :class:`~google.cloud.storage.client.Client` or
3457 ``NoneType``
3458 :param client: (Optional) The client to use. If not passed, falls back
3459 to the ``client`` stored on the current bucket.
3460 :type timeout: float or tuple
3461 :param timeout:
3462 (Optional) The amount of time, in seconds, to wait
3463 for the server response. See: :ref:`configuring_timeouts`
3464
3465 :type if_metageneration_match: long
3466 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3467 blob's current metageneration matches the given value.
3468
3469 :type if_metageneration_not_match: long
3470 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3471 blob's current metageneration does not match the given value.
3472
3473 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3474 :param retry:
3475 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3476
3477 :raises ValueError:
3478 If ``recursive`` is True, and the bucket contains more than 256
3479 blobs. This is to prevent extremely long runtime of this
3480 method. For such buckets, iterate over the blobs returned by
3481 :meth:`list_blobs` and call
3482 :meth:`~google.cloud.storage.blob.Blob.make_public`
3483 for each blob.
3484 """
3485 with create_trace_span(name="Storage.Bucket.makePublic"):
3486 self.acl.all().grant_read()
3487 self.acl.save(
3488 client=client,
3489 timeout=timeout,
3490 if_metageneration_match=if_metageneration_match,
3491 if_metageneration_not_match=if_metageneration_not_match,
3492 retry=retry,
3493 )
3494
3495 if future:
3496 doa = self.default_object_acl
3497 if not doa.loaded:
3498 doa.reload(client=client, timeout=timeout)
3499 doa.all().grant_read()
3500 doa.save(
3501 client=client,
3502 timeout=timeout,
3503 if_metageneration_match=if_metageneration_match,
3504 if_metageneration_not_match=if_metageneration_not_match,
3505 retry=retry,
3506 )
3507
3508 if recursive:
3509 blobs = list(
3510 self.list_blobs(
3511 projection="full",
3512 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3513 client=client,
3514 timeout=timeout,
3515 )
3516 )
3517 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3518 message = (
3519 "Refusing to make public recursively with more than "
3520 "%d objects. If you actually want to make every object "
3521 "in this bucket public, iterate through the blobs "
3522 "returned by 'Bucket.list_blobs()' and call "
3523 "'make_public' on each one."
3524 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3525 raise ValueError(message)
3526
3527 for blob in blobs:
3528 blob.acl.all().grant_read()
3529 blob.acl.save(
3530 client=client,
3531 timeout=timeout,
3532 )
3533
3534 def make_private(
3535 self,
3536 recursive=False,
3537 future=False,
3538 client=None,
3539 timeout=_DEFAULT_TIMEOUT,
3540 if_metageneration_match=None,
3541 if_metageneration_not_match=None,
3542 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
3543 ):
3544 """Update bucket's ACL, revoking read access for anonymous users.
3545
3546 :type recursive: bool
3547 :param recursive: If True, this will make all blobs inside the bucket
3548 private as well.
3549
3550 :type future: bool
3551 :param future: If True, this will make all objects created in the
3552 future private as well.
3553
3554 :type client: :class:`~google.cloud.storage.client.Client` or
3555 ``NoneType``
3556 :param client: (Optional) The client to use. If not passed, falls back
3557 to the ``client`` stored on the current bucket.
3558
3559 :type timeout: float or tuple
3560 :param timeout:
3561 (Optional) The amount of time, in seconds, to wait
3562 for the server response. See: :ref:`configuring_timeouts`
3563
3564 :type if_metageneration_match: long
3565 :param if_metageneration_match: (Optional) Make the operation conditional on whether the
3566 blob's current metageneration matches the given value.
3567 :type if_metageneration_not_match: long
3568 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
3569 blob's current metageneration does not match the given value.
3570 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3571 :param retry:
3572 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3573
3574 :raises ValueError:
3575 If ``recursive`` is True, and the bucket contains more than 256
3576 blobs. This is to prevent extremely long runtime of this
3577 method. For such buckets, iterate over the blobs returned by
3578 :meth:`list_blobs` and call
3579 :meth:`~google.cloud.storage.blob.Blob.make_private`
3580 for each blob.
3581 """
3582 with create_trace_span(name="Storage.Bucket.makePrivate"):
3583 self.acl.all().revoke_read()
3584 self.acl.save(
3585 client=client,
3586 timeout=timeout,
3587 if_metageneration_match=if_metageneration_match,
3588 if_metageneration_not_match=if_metageneration_not_match,
3589 retry=retry,
3590 )
3591
3592 if future:
3593 doa = self.default_object_acl
3594 if not doa.loaded:
3595 doa.reload(client=client, timeout=timeout)
3596 doa.all().revoke_read()
3597 doa.save(
3598 client=client,
3599 timeout=timeout,
3600 if_metageneration_match=if_metageneration_match,
3601 if_metageneration_not_match=if_metageneration_not_match,
3602 retry=retry,
3603 )
3604
3605 if recursive:
3606 blobs = list(
3607 self.list_blobs(
3608 projection="full",
3609 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3610 client=client,
3611 timeout=timeout,
3612 )
3613 )
3614 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3615 message = (
3616 "Refusing to make private recursively with more than "
3617 "%d objects. If you actually want to make every object "
3618 "in this bucket private, iterate through the blobs "
3619 "returned by 'Bucket.list_blobs()' and call "
3620 "'make_private' on each one."
3621 ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3622 raise ValueError(message)
3623
3624 for blob in blobs:
3625 blob.acl.all().revoke_read()
3626 blob.acl.save(client=client, timeout=timeout)
3627
3628 def generate_upload_policy(self, conditions, expiration=None, client=None):
3629 """Create a signed upload policy for uploading objects.
3630
3631 This method generates and signs a policy document. You can use
3632 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms)
3633 to allow visitors to a website to upload files to
3634 Google Cloud Storage without giving them direct write access.
3635 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python).
3636
3637 :type expiration: datetime
3638 :param expiration: (Optional) Expiration in UTC. If not specified, the
3639 policy will expire in 1 hour.
3640
3641 :type conditions: list
3642 :param conditions: A list of conditions as described in the
3643 `policy documents` documentation.
3644
3645 :type client: :class:`~google.cloud.storage.client.Client`
3646 :param client: (Optional) The client to use. If not passed, falls back
3647 to the ``client`` stored on the current bucket.
3648
3649 :rtype: dict
3650 :returns: A dictionary of (form field name, form field value) of form
3651 fields that should be added to your HTML upload form in order
3652 to attach the signature.
3653 """
3654 client = self._require_client(client)
3655 credentials = client._credentials
3656 _signing.ensure_signed_credentials(credentials)
3657
3658 if expiration is None:
3659 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1)
3660
3661 conditions = conditions + [{"bucket": self.name}]
3662
3663 policy_document = {
3664 "expiration": _datetime_to_rfc3339(expiration),
3665 "conditions": conditions,
3666 }
3667
3668 encoded_policy_document = base64.b64encode(
3669 json.dumps(policy_document).encode("utf-8")
3670 )
3671 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
3672
3673 fields = {
3674 "bucket": self.name,
3675 "GoogleAccessId": credentials.signer_email,
3676 "policy": encoded_policy_document.decode("utf-8"),
3677 "signature": signature.decode("utf-8"),
3678 }
3679
3680 return fields
3681
3682 def lock_retention_policy(
3683 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3684 ):
3685 """Lock the bucket's retention policy.
3686
3687 :type client: :class:`~google.cloud.storage.client.Client` or
3688 ``NoneType``
3689 :param client: (Optional) The client to use. If not passed, falls back
3690 to the ``client`` stored on the blob's bucket.
3691
3692 :type timeout: float or tuple
3693 :param timeout:
3694 (Optional) The amount of time, in seconds, to wait
3695 for the server response. See: :ref:`configuring_timeouts`
3696
3697 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3698 :param retry:
3699 (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3700
3701 :raises ValueError:
3702 if the bucket has no metageneration (i.e., new or never reloaded);
3703 if the bucket has no retention policy assigned;
3704 if the bucket's retention policy is already locked.
3705 """
3706 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"):
3707 if "metageneration" not in self._properties:
3708 raise ValueError(
3709 "Bucket has no retention policy assigned: try 'reload'?"
3710 )
3711
3712 policy = self._properties.get("retentionPolicy")
3713
3714 if policy is None:
3715 raise ValueError(
3716 "Bucket has no retention policy assigned: try 'reload'?"
3717 )
3718
3719 if policy.get("isLocked"):
3720 raise ValueError("Bucket's retention policy is already locked.")
3721
3722 client = self._require_client(client)
3723
3724 query_params = {"ifMetagenerationMatch": self.metageneration}
3725
3726 if self.user_project is not None:
3727 query_params["userProject"] = self.user_project
3728
3729 path = f"/b/{self.name}/lockRetentionPolicy"
3730 api_response = client._post_resource(
3731 path,
3732 None,
3733 query_params=query_params,
3734 timeout=timeout,
3735 retry=retry,
3736 _target_object=self,
3737 )
3738 self._set_properties(api_response)
3739
3740 def generate_signed_url(
3741 self,
3742 expiration=None,
3743 api_access_endpoint=None,
3744 method="GET",
3745 headers=None,
3746 query_parameters=None,
3747 client=None,
3748 credentials=None,
3749 version=None,
3750 virtual_hosted_style=False,
3751 bucket_bound_hostname=None,
3752 scheme="http",
3753 ):
3754 """Generates a signed URL for this bucket.
3755
3756 .. note::
3757
3758 If you are on Google Compute Engine, you can't generate a signed
3759 URL using GCE service account. If you'd like to be able to generate
3760 a signed URL from GCE, you can use a standard service account from a
3761 JSON file rather than a GCE service account.
3762
3763 If you have a bucket that you want to allow access to for a set
3764 amount of time, you can use this method to generate a URL that
3765 is only valid within a certain time period.
3766
3767 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`,
3768 ``https`` works only if using a ``CDN``.
3769
3770 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
3771 :param expiration: Point in time when the signed URL should expire. If
3772 a ``datetime`` instance is passed without an explicit
3773 ``tzinfo`` set, it will be assumed to be ``UTC``.
3774
3775 :type api_access_endpoint: str
3776 :param api_access_endpoint: (Optional) URI base, for instance
3777 "https://storage.googleapis.com". If not specified, the client's
3778 api_endpoint will be used. Incompatible with bucket_bound_hostname.
3779
3780 :type method: str
3781 :param method: The HTTP verb that will be used when requesting the URL.
3782
3783 :type headers: dict
3784 :param headers:
3785 (Optional) Additional HTTP headers to be included as part of the
3786 signed URLs. See:
3787 https://cloud.google.com/storage/docs/xml-api/reference-headers
3788 Requests using the signed URL *must* pass the specified header
3789 (name and value) with each request for the URL.
3790
3791 :type query_parameters: dict
3792 :param query_parameters:
3793 (Optional) Additional query parameters to be included as part of the
3794 signed URLs. See:
3795 https://cloud.google.com/storage/docs/xml-api/reference-headers#query
3796
3797 :type client: :class:`~google.cloud.storage.client.Client` or
3798 ``NoneType``
3799 :param client: (Optional) The client to use. If not passed, falls back
3800 to the ``client`` stored on the blob's bucket.
3801
3802 :type credentials: :class:`google.auth.credentials.Credentials` or
3803 :class:`NoneType`
3804 :param credentials: The authorization credentials to attach to requests.
3805 These credentials identify this application to the service.
3806 If none are specified, the client will attempt to ascertain
3807 the credentials from the environment.
3808
3809 :type version: str
3810 :param version: (Optional) The version of signed credential to create.
3811 Must be one of 'v2' | 'v4'.
3812
3813 :type virtual_hosted_style: bool
3814 :param virtual_hosted_style:
3815 (Optional) If true, then construct the URL relative the bucket's
3816 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
3817 Incompatible with bucket_bound_hostname.
3818
3819 :type bucket_bound_hostname: str
3820 :param bucket_bound_hostname:
3821 (Optional) If passed, then construct the URL relative to the bucket-bound hostname.
3822 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'.
3823 Incompatible with api_access_endpoint and virtual_hosted_style.
3824 See: https://cloud.google.com/storage/docs/request-endpoints#cname
3825
3826 :type scheme: str
3827 :param scheme:
3828 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
3829 this value as the scheme. ``https`` will work only when using a CDN.
3830 Defaults to ``"http"``.
3831
3832 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used.
3833 :raises: :exc:`TypeError` when expiration is not a valid type.
3834 :raises: :exc:`AttributeError` if credentials is not an instance
3835 of :class:`google.auth.credentials.Signing`.
3836
3837 :rtype: str
3838 :returns: A signed URL you can use to access the resource
3839 until expiration.
3840 """
3841 if version is None:
3842 version = "v2"
3843 elif version not in ("v2", "v4"):
3844 raise ValueError("'version' must be either 'v2' or 'v4'")
3845
3846 if (
3847 api_access_endpoint is not None or virtual_hosted_style
3848 ) and bucket_bound_hostname:
3849 raise ValueError(
3850 "The bucket_bound_hostname argument is not compatible with "
3851 "either api_access_endpoint or virtual_hosted_style."
3852 )
3853
3854 if api_access_endpoint is None:
3855 client = self._require_client(client)
3856 api_access_endpoint = client.api_endpoint
3857
3858 # If you are on Google Compute Engine, you can't generate a signed URL
3859 # using GCE service account.
3860 # See https://github.com/googleapis/google-auth-library-python/issues/50
3861 if virtual_hosted_style:
3862 api_access_endpoint = _virtual_hosted_style_base_url(
3863 api_access_endpoint, self.name
3864 )
3865 resource = "/"
3866 elif bucket_bound_hostname:
3867 api_access_endpoint = _bucket_bound_hostname_url(
3868 bucket_bound_hostname, scheme
3869 )
3870 resource = "/"
3871 else:
3872 resource = f"/{self.name}"
3873
3874 if credentials is None:
3875 client = self._require_client(client) # May be redundant, but that's ok.
3876 credentials = client._credentials
3877
3878 if version == "v2":
3879 helper = generate_signed_url_v2
3880 else:
3881 helper = generate_signed_url_v4
3882
3883 return helper(
3884 credentials,
3885 resource=resource,
3886 expiration=expiration,
3887 api_access_endpoint=api_access_endpoint,
3888 method=method.upper(),
3889 headers=headers,
3890 query_parameters=query_parameters,
3891 )
3892
3893
3894class SoftDeletePolicy(dict):
3895 """Map a bucket's soft delete policy.
3896
3897 See https://cloud.google.com/storage/docs/soft-delete
3898
3899 :type bucket: :class:`Bucket`
3900 :param bucket: Bucket for which this instance is the policy.
3901
3902 :type retention_duration_seconds: int
3903 :param retention_duration_seconds:
3904 (Optional) The period of time in seconds that soft-deleted objects in the bucket
3905 will be retained and cannot be permanently deleted.
3906
3907 :type effective_time: :class:`datetime.datetime`
3908 :param effective_time:
3909 (Optional) When the bucket's soft delete policy is effective.
3910 This value should normally only be set by the back-end API.
3911 """
3912
3913 def __init__(self, bucket, **kw):
3914 data = {}
3915 retention_duration_seconds = kw.get("retention_duration_seconds")
3916 data["retentionDurationSeconds"] = retention_duration_seconds
3917
3918 effective_time = kw.get("effective_time")
3919 if effective_time is not None:
3920 effective_time = _datetime_to_rfc3339(effective_time)
3921 data["effectiveTime"] = effective_time
3922
3923 super().__init__(data)
3924 self._bucket = bucket
3925
3926 @classmethod
3927 def from_api_repr(cls, resource, bucket):
3928 """Factory: construct instance from resource.
3929
3930 :type resource: dict
3931 :param resource: mapping as returned from API call.
3932
3933 :type bucket: :class:`Bucket`
3934 :params bucket: Bucket for which this instance is the policy.
3935
3936 :rtype: :class:`SoftDeletePolicy`
3937 :returns: Instance created from resource.
3938 """
3939 instance = cls(bucket)
3940 instance.update(resource)
3941 return instance
3942
3943 @property
3944 def bucket(self):
3945 """Bucket for which this instance is the policy.
3946
3947 :rtype: :class:`Bucket`
3948 :returns: the instance's bucket.
3949 """
3950 return self._bucket
3951
3952 @property
3953 def retention_duration_seconds(self):
3954 """Get the retention duration of the bucket's soft delete policy.
3955
3956 :rtype: int or ``NoneType``
3957 :returns: The period of time in seconds that soft-deleted objects in the bucket
3958 will be retained and cannot be permanently deleted; Or ``None`` if the
3959 property is not set.
3960 """
3961 duration = self.get("retentionDurationSeconds")
3962 if duration is not None:
3963 return int(duration)
3964
3965 @retention_duration_seconds.setter
3966 def retention_duration_seconds(self, value):
3967 """Set the retention duration of the bucket's soft delete policy.
3968
3969 :type value: int
3970 :param value:
3971 The period of time in seconds that soft-deleted objects in the bucket
3972 will be retained and cannot be permanently deleted.
3973 """
3974 self["retentionDurationSeconds"] = value
3975 self.bucket._patch_property("softDeletePolicy", self)
3976
3977 @property
3978 def effective_time(self):
3979 """Get the effective time of the bucket's soft delete policy.
3980
3981 :rtype: datetime.datetime or ``NoneType``
3982 :returns: point-in time at which the bucket's soft delte policy is
3983 effective, or ``None`` if the property is not set.
3984 """
3985 timestamp = self.get("effectiveTime")
3986 if timestamp is not None:
3987 return _rfc3339_nanos_to_datetime(timestamp)
3988
3989
3990def _raise_if_len_differs(expected_len, **generation_match_args):
3991 """
3992 Raise an error if any generation match argument
3993 is set and its len differs from the given value.
3994
3995 :type expected_len: int
3996 :param expected_len: Expected argument length in case it's set.
3997
3998 :type generation_match_args: dict
3999 :param generation_match_args: Lists, which length must be checked.
4000
4001 :raises: :exc:`ValueError` if any argument set, but has an unexpected length.
4002 """
4003 for name, value in generation_match_args.items():
4004 if value is not None and len(value) != expected_len:
4005 raise ValueError(f"'{name}' length must be the same as 'blobs' length")