Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/google/cloud/storage/bucket.py: 35%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

943 statements  

1# Copyright 2014 Google LLC 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14 

15"""Create / interact with Google Cloud Storage buckets.""" 

16 

17import base64 

18import copy 

19import datetime 

20import json 

21from urllib.parse import urlsplit 

22import warnings 

23 

24from google.api_core import datetime_helpers 

25from google.cloud._helpers import _datetime_to_rfc3339 

26from google.cloud._helpers import _rfc3339_nanos_to_datetime 

27from google.cloud.exceptions import NotFound 

28from google.api_core.iam import Policy 

29from google.cloud.storage import _signing 

30from google.cloud.storage._helpers import _add_etag_match_headers 

31from google.cloud.storage._helpers import _add_generation_match_parameters 

32from google.cloud.storage._helpers import _NOW 

33from google.cloud.storage._helpers import _PropertyMixin 

34from google.cloud.storage._helpers import _UTC 

35from google.cloud.storage._helpers import _scalar_property 

36from google.cloud.storage._helpers import _validate_name 

37from google.cloud.storage._signing import generate_signed_url_v2 

38from google.cloud.storage._signing import generate_signed_url_v4 

39from google.cloud.storage._helpers import _bucket_bound_hostname_url 

40from google.cloud.storage._helpers import _virtual_hosted_style_base_url 

41from google.cloud.storage._opentelemetry_tracing import create_trace_span 

42from google.cloud.storage.acl import BucketACL 

43from google.cloud.storage.acl import DefaultObjectACL 

44from google.cloud.storage.blob import _quote 

45from google.cloud.storage.blob import Blob 

46from google.cloud.storage.constants import ( 

47 _DEFAULT_TIMEOUT, 

48 ENFORCEMENT_MODE_FULLY_RESTRICTED, 

49 ENFORCEMENT_MODE_NOT_RESTRICTED, 

50) 

51from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS 

52from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS 

53from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE 

54from google.cloud.storage.constants import ( 

55 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, 

56) 

57from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS 

58from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE 

59from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS 

60from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED 

61from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS 

62from google.cloud.storage.constants import REGION_LOCATION_TYPE 

63from google.cloud.storage.constants import STANDARD_STORAGE_CLASS 

64from google.cloud.storage.ip_filter import IPFilter 

65from google.cloud.storage.notification import BucketNotification 

66from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT 

67from google.cloud.storage.retry import DEFAULT_RETRY 

68from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED 

69from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON 

70from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED 

71 

72_UBLA_BPO_ENABLED_MESSAGE = ( 

73 "Pass only one of 'uniform_bucket_level_access_enabled' / " 

74 "'bucket_policy_only_enabled' to 'IAMConfiguration'." 

75) 

76_BPO_ENABLED_MESSAGE = ( 

77 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. " 

78 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'." 

79) 

80_UBLA_BPO_LOCK_TIME_MESSAGE = ( 

81 "Pass only one of 'uniform_bucket_level_access_lock_time' / " 

82 "'bucket_policy_only_lock_time' to 'IAMConfiguration'." 

83) 

84_BPO_LOCK_TIME_MESSAGE = ( 

85 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. " 

86 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'." 

87) 

88_LOCATION_SETTER_MESSAGE = ( 

89 "Assignment to 'Bucket.location' is deprecated, as it is only " 

90 "valid before the bucket is created. Instead, pass the location " 

91 "to `Bucket.create`." 

92) 

93_FROM_STRING_MESSAGE = ( 

94 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead." 

95) 

96_IP_FILTER_PROPERTY = "ipFilter" 

97 

98 

99def _blobs_page_start(iterator, page, response): 

100 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started. 

101 

102 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

103 :param iterator: The iterator that is currently in use. 

104 

105 :type page: :class:`~google.cloud.api.core.page_iterator.Page` 

106 :param page: The page that was just created. 

107 

108 :type response: dict 

109 :param response: The JSON API response for a page of blobs. 

110 """ 

111 page.prefixes = tuple(response.get("prefixes", ())) 

112 iterator.prefixes.update(page.prefixes) 

113 

114 

115def _item_to_blob(iterator, item): 

116 """Convert a JSON blob to the native object. 

117 

118 .. note:: 

119 

120 This assumes that the ``bucket`` attribute has been 

121 added to the iterator after being created. 

122 

123 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

124 :param iterator: The iterator that has retrieved the item. 

125 

126 :type item: dict 

127 :param item: An item to be converted to a blob. 

128 

129 :rtype: :class:`.Blob` 

130 :returns: The next blob in the page. 

131 """ 

132 name = item.get("name") 

133 blob = Blob(name, bucket=iterator.bucket) 

134 blob._set_properties(item) 

135 return blob 

136 

137 

138def _item_to_notification(iterator, item): 

139 """Convert a JSON blob to the native object. 

140 

141 .. note:: 

142 

143 This assumes that the ``bucket`` attribute has been 

144 added to the iterator after being created. 

145 

146 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

147 :param iterator: The iterator that has retrieved the item. 

148 

149 :type item: dict 

150 :param item: An item to be converted to a blob. 

151 

152 :rtype: :class:`.BucketNotification` 

153 :returns: The next notification being iterated. 

154 """ 

155 return BucketNotification.from_api_repr(item, bucket=iterator.bucket) 

156 

157 

158class LifecycleRuleConditions(dict): 

159 """Map a single lifecycle rule for a bucket. 

160 

161 See: https://cloud.google.com/storage/docs/lifecycle 

162 

163 :type age: int 

164 :param age: (Optional) Apply rule action to items whose age, in days, 

165 exceeds this value. 

166 

167 :type created_before: datetime.date 

168 :param created_before: (Optional) Apply rule action to items created 

169 before this date. 

170 

171 :type is_live: bool 

172 :param is_live: (Optional) If true, apply rule action to non-versioned 

173 items, or to items with no newer versions. If false, apply 

174 rule action to versioned items with at least one newer 

175 version. 

176 

177 :type matches_prefix: list(str) 

178 :param matches_prefix: (Optional) Apply rule action to items which 

179 any prefix matches the beginning of the item name. 

180 

181 :type matches_storage_class: list(str), one or more of 

182 :attr:`Bucket.STORAGE_CLASSES`. 

183 :param matches_storage_class: (Optional) Apply rule action to items 

184 whose storage class matches this value. 

185 

186 :type matches_suffix: list(str) 

187 :param matches_suffix: (Optional) Apply rule action to items which 

188 any suffix matches the end of the item name. 

189 

190 :type number_of_newer_versions: int 

191 :param number_of_newer_versions: (Optional) Apply rule action to versioned 

192 items having N newer versions. 

193 

194 :type days_since_custom_time: int 

195 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days 

196 elapsed since the custom timestamp. This condition is relevant 

197 only for versioned objects. The value of the field must be a non 

198 negative integer. If it's zero, the object version will become 

199 eligible for lifecycle action as soon as it becomes custom. 

200 

201 :type custom_time_before: :class:`datetime.date` 

202 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action 

203 to items whose custom time is before this date. This condition is relevant 

204 only for versioned objects, e.g., 2019-03-16. 

205 

206 :type days_since_noncurrent_time: int 

207 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days 

208 elapsed since the non current timestamp. This condition 

209 is relevant only for versioned objects. The value of the field 

210 must be a non negative integer. If it's zero, the object version 

211 will become eligible for lifecycle action as soon as it becomes 

212 non current. 

213 

214 :type noncurrent_time_before: :class:`datetime.date` 

215 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply 

216 rule action to items whose non current time is before this date. 

217 This condition is relevant only for versioned objects, e.g, 2019-03-16. 

218 

219 :raises ValueError: if no arguments are passed. 

220 """ 

221 

222 def __init__( 

223 self, 

224 age=None, 

225 created_before=None, 

226 is_live=None, 

227 matches_storage_class=None, 

228 number_of_newer_versions=None, 

229 days_since_custom_time=None, 

230 custom_time_before=None, 

231 days_since_noncurrent_time=None, 

232 noncurrent_time_before=None, 

233 matches_prefix=None, 

234 matches_suffix=None, 

235 _factory=False, 

236 ): 

237 conditions = {} 

238 

239 if age is not None: 

240 conditions["age"] = age 

241 

242 if created_before is not None: 

243 conditions["createdBefore"] = created_before.isoformat() 

244 

245 if is_live is not None: 

246 conditions["isLive"] = is_live 

247 

248 if matches_storage_class is not None: 

249 conditions["matchesStorageClass"] = matches_storage_class 

250 

251 if number_of_newer_versions is not None: 

252 conditions["numNewerVersions"] = number_of_newer_versions 

253 

254 if days_since_custom_time is not None: 

255 conditions["daysSinceCustomTime"] = days_since_custom_time 

256 

257 if custom_time_before is not None: 

258 conditions["customTimeBefore"] = custom_time_before.isoformat() 

259 

260 if days_since_noncurrent_time is not None: 

261 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time 

262 

263 if noncurrent_time_before is not None: 

264 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat() 

265 

266 if matches_prefix is not None: 

267 conditions["matchesPrefix"] = matches_prefix 

268 

269 if matches_suffix is not None: 

270 conditions["matchesSuffix"] = matches_suffix 

271 

272 if not _factory and not conditions: 

273 raise ValueError("Supply at least one condition") 

274 

275 super(LifecycleRuleConditions, self).__init__(conditions) 

276 

277 @classmethod 

278 def from_api_repr(cls, resource): 

279 """Factory: construct instance from resource. 

280 

281 :type resource: dict 

282 :param resource: mapping as returned from API call. 

283 

284 :rtype: :class:`LifecycleRuleConditions` 

285 :returns: Instance created from resource. 

286 """ 

287 instance = cls(_factory=True) 

288 instance.update(resource) 

289 return instance 

290 

291 @property 

292 def age(self): 

293 """Conditon's age value.""" 

294 return self.get("age") 

295 

296 @property 

297 def created_before(self): 

298 """Conditon's created_before value.""" 

299 before = self.get("createdBefore") 

300 if before is not None: 

301 return datetime_helpers.from_iso8601_date(before) 

302 

303 @property 

304 def is_live(self): 

305 """Conditon's 'is_live' value.""" 

306 return self.get("isLive") 

307 

308 @property 

309 def matches_prefix(self): 

310 """Conditon's 'matches_prefix' value.""" 

311 return self.get("matchesPrefix") 

312 

313 @property 

314 def matches_storage_class(self): 

315 """Conditon's 'matches_storage_class' value.""" 

316 return self.get("matchesStorageClass") 

317 

318 @property 

319 def matches_suffix(self): 

320 """Conditon's 'matches_suffix' value.""" 

321 return self.get("matchesSuffix") 

322 

323 @property 

324 def number_of_newer_versions(self): 

325 """Conditon's 'number_of_newer_versions' value.""" 

326 return self.get("numNewerVersions") 

327 

328 @property 

329 def days_since_custom_time(self): 

330 """Conditon's 'days_since_custom_time' value.""" 

331 return self.get("daysSinceCustomTime") 

332 

333 @property 

334 def custom_time_before(self): 

335 """Conditon's 'custom_time_before' value.""" 

336 before = self.get("customTimeBefore") 

337 if before is not None: 

338 return datetime_helpers.from_iso8601_date(before) 

339 

340 @property 

341 def days_since_noncurrent_time(self): 

342 """Conditon's 'days_since_noncurrent_time' value.""" 

343 return self.get("daysSinceNoncurrentTime") 

344 

345 @property 

346 def noncurrent_time_before(self): 

347 """Conditon's 'noncurrent_time_before' value.""" 

348 before = self.get("noncurrentTimeBefore") 

349 if before is not None: 

350 return datetime_helpers.from_iso8601_date(before) 

351 

352 

353class LifecycleRuleDelete(dict): 

354 """Map a lifecycle rule deleting matching items. 

355 

356 :type kw: dict 

357 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

358 """ 

359 

360 def __init__(self, **kw): 

361 conditions = LifecycleRuleConditions(**kw) 

362 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)} 

363 super().__init__(rule) 

364 

365 @classmethod 

366 def from_api_repr(cls, resource): 

367 """Factory: construct instance from resource. 

368 

369 :type resource: dict 

370 :param resource: mapping as returned from API call. 

371 

372 :rtype: :class:`LifecycleRuleDelete` 

373 :returns: Instance created from resource. 

374 """ 

375 instance = cls(_factory=True) 

376 instance.update(resource) 

377 return instance 

378 

379 

380class LifecycleRuleSetStorageClass(dict): 

381 """Map a lifecycle rule updating storage class of matching items. 

382 

383 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`. 

384 :param storage_class: new storage class to assign to matching items. 

385 

386 :type kw: dict 

387 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

388 """ 

389 

390 def __init__(self, storage_class, **kw): 

391 conditions = LifecycleRuleConditions(**kw) 

392 rule = { 

393 "action": { 

394 "type": "SetStorageClass", 

395 "storageClass": storage_class, 

396 }, 

397 "condition": dict(conditions), 

398 } 

399 super().__init__(rule) 

400 

401 @classmethod 

402 def from_api_repr(cls, resource): 

403 """Factory: construct instance from resource. 

404 

405 :type resource: dict 

406 :param resource: mapping as returned from API call. 

407 

408 :rtype: :class:`LifecycleRuleSetStorageClass` 

409 :returns: Instance created from resource. 

410 """ 

411 action = resource["action"] 

412 instance = cls(action["storageClass"], _factory=True) 

413 instance.update(resource) 

414 return instance 

415 

416 

417class LifecycleRuleAbortIncompleteMultipartUpload(dict): 

418 """Map a rule aborting incomplete multipart uploads of matching items. 

419 

420 The "age" lifecycle condition is the only supported condition for this rule. 

421 

422 :type kw: dict 

423 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

424 """ 

425 

426 def __init__(self, **kw): 

427 conditions = LifecycleRuleConditions(**kw) 

428 rule = { 

429 "action": {"type": "AbortIncompleteMultipartUpload"}, 

430 "condition": dict(conditions), 

431 } 

432 super().__init__(rule) 

433 

434 @classmethod 

435 def from_api_repr(cls, resource): 

436 """Factory: construct instance from resource. 

437 

438 :type resource: dict 

439 :param resource: mapping as returned from API call. 

440 

441 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload` 

442 :returns: Instance created from resource. 

443 """ 

444 instance = cls(_factory=True) 

445 instance.update(resource) 

446 return instance 

447 

448 

449_default = object() 

450 

451 

452class IAMConfiguration(dict): 

453 """Map a bucket's IAM configuration. 

454 

455 :type bucket: :class:`Bucket` 

456 :params bucket: Bucket for which this instance is the policy. 

457 

458 :type public_access_prevention: str 

459 :params public_access_prevention: 

460 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced' 

461 See: https://cloud.google.com/storage/docs/public-access-prevention 

462 

463 :type uniform_bucket_level_access_enabled: bool 

464 :params bucket_policy_only_enabled: 

465 (Optional) Whether the IAM-only policy is enabled for the bucket. 

466 

467 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime` 

468 :params uniform_bucket_level_locked_time: 

469 (Optional) When the bucket's IAM-only policy was enabled. 

470 This value should normally only be set by the back-end API. 

471 

472 :type bucket_policy_only_enabled: bool 

473 :params bucket_policy_only_enabled: 

474 Deprecated alias for :data:`uniform_bucket_level_access_enabled`. 

475 

476 :type bucket_policy_only_locked_time: :class:`datetime.datetime` 

477 :params bucket_policy_only_locked_time: 

478 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`. 

479 """ 

480 

481 def __init__( 

482 self, 

483 bucket, 

484 public_access_prevention=_default, 

485 uniform_bucket_level_access_enabled=_default, 

486 uniform_bucket_level_access_locked_time=_default, 

487 bucket_policy_only_enabled=_default, 

488 bucket_policy_only_locked_time=_default, 

489 ): 

490 if bucket_policy_only_enabled is not _default: 

491 if uniform_bucket_level_access_enabled is not _default: 

492 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE) 

493 

494 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

495 uniform_bucket_level_access_enabled = bucket_policy_only_enabled 

496 

497 if bucket_policy_only_locked_time is not _default: 

498 if uniform_bucket_level_access_locked_time is not _default: 

499 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE) 

500 

501 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2) 

502 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time 

503 

504 if uniform_bucket_level_access_enabled is _default: 

505 uniform_bucket_level_access_enabled = False 

506 

507 if public_access_prevention is _default: 

508 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED 

509 

510 data = { 

511 "uniformBucketLevelAccess": { 

512 "enabled": uniform_bucket_level_access_enabled 

513 }, 

514 "publicAccessPrevention": public_access_prevention, 

515 } 

516 if uniform_bucket_level_access_locked_time is not _default: 

517 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339( 

518 uniform_bucket_level_access_locked_time 

519 ) 

520 super(IAMConfiguration, self).__init__(data) 

521 self._bucket = bucket 

522 

523 @classmethod 

524 def from_api_repr(cls, resource, bucket): 

525 """Factory: construct instance from resource. 

526 

527 :type bucket: :class:`Bucket` 

528 :params bucket: Bucket for which this instance is the policy. 

529 

530 :type resource: dict 

531 :param resource: mapping as returned from API call. 

532 

533 :rtype: :class:`IAMConfiguration` 

534 :returns: Instance created from resource. 

535 """ 

536 instance = cls(bucket) 

537 instance.update(resource) 

538 return instance 

539 

540 @property 

541 def bucket(self): 

542 """Bucket for which this instance is the policy. 

543 

544 :rtype: :class:`Bucket` 

545 :returns: the instance's bucket. 

546 """ 

547 return self._bucket 

548 

549 @property 

550 def public_access_prevention(self): 

551 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'. 

552 

553 See: https://cloud.google.com/storage/docs/public-access-prevention 

554 

555 :rtype: string 

556 :returns: the public access prevention status, either 'enforced' or 'inherited'. 

557 """ 

558 return self["publicAccessPrevention"] 

559 

560 @public_access_prevention.setter 

561 def public_access_prevention(self, value): 

562 self["publicAccessPrevention"] = value 

563 self.bucket._patch_property("iamConfiguration", self) 

564 

565 @property 

566 def uniform_bucket_level_access_enabled(self): 

567 """If set, access checks only use bucket-level IAM policies or above. 

568 

569 :rtype: bool 

570 :returns: whether the bucket is configured to allow only IAM. 

571 """ 

572 ubla = self.get("uniformBucketLevelAccess", {}) 

573 return ubla.get("enabled", False) 

574 

575 @uniform_bucket_level_access_enabled.setter 

576 def uniform_bucket_level_access_enabled(self, value): 

577 ubla = self.setdefault("uniformBucketLevelAccess", {}) 

578 ubla["enabled"] = bool(value) 

579 self.bucket._patch_property("iamConfiguration", self) 

580 

581 @property 

582 def uniform_bucket_level_access_locked_time(self): 

583 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false. 

584 

585 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property 

586 is time time after which that setting becomes immutable. 

587 

588 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property 

589 is ``None``. 

590 

591 :rtype: Union[:class:`datetime.datetime`, None] 

592 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will 

593 be frozen as true. 

594 """ 

595 ubla = self.get("uniformBucketLevelAccess", {}) 

596 stamp = ubla.get("lockedTime") 

597 if stamp is not None: 

598 stamp = _rfc3339_nanos_to_datetime(stamp) 

599 return stamp 

600 

601 @property 

602 def bucket_policy_only_enabled(self): 

603 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`. 

604 

605 :rtype: bool 

606 :returns: whether the bucket is configured to allow only IAM. 

607 """ 

608 return self.uniform_bucket_level_access_enabled 

609 

610 @bucket_policy_only_enabled.setter 

611 def bucket_policy_only_enabled(self, value): 

612 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

613 self.uniform_bucket_level_access_enabled = value 

614 

615 @property 

616 def bucket_policy_only_locked_time(self): 

617 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`. 

618 

619 :rtype: Union[:class:`datetime.datetime`, None] 

620 :returns: 

621 (readonly) Time after which :attr:`bucket_policy_only_enabled` will 

622 be frozen as true. 

623 """ 

624 return self.uniform_bucket_level_access_locked_time 

625 

626 

627class Bucket(_PropertyMixin): 

628 """A class representing a Bucket on Cloud Storage. 

629 

630 :type client: :class:`google.cloud.storage.client.Client` 

631 :param client: A client which holds credentials and project configuration 

632 for the bucket (which requires a project). 

633 

634 :type name: str 

635 :param name: The name of the bucket. Bucket names must start and end with a 

636 number or letter. 

637 

638 :type user_project: str 

639 :param user_project: (Optional) the project ID to be billed for API 

640 requests made via this instance. 

641 

642 :type generation: int 

643 :param generation: (Optional) If present, selects a specific revision of 

644 this bucket. 

645 """ 

646 

647 _MAX_OBJECTS_FOR_ITERATION = 256 

648 """Maximum number of existing objects allowed in iteration. 

649 

650 This is used in Bucket.delete() and Bucket.make_public(). 

651 """ 

652 

653 STORAGE_CLASSES = ( 

654 STANDARD_STORAGE_CLASS, 

655 NEARLINE_STORAGE_CLASS, 

656 COLDLINE_STORAGE_CLASS, 

657 ARCHIVE_STORAGE_CLASS, 

658 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

659 REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

660 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy 

661 ) 

662 """Allowed values for :attr:`storage_class`. 

663 

664 Default value is :attr:`STANDARD_STORAGE_CLASS`. 

665 

666 See 

667 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass 

668 https://cloud.google.com/storage/docs/storage-classes 

669 """ 

670 

671 _LOCATION_TYPES = ( 

672 MULTI_REGION_LOCATION_TYPE, 

673 REGION_LOCATION_TYPE, 

674 DUAL_REGION_LOCATION_TYPE, 

675 ) 

676 """Allowed values for :attr:`location_type`.""" 

677 

678 def __init__(self, client, name=None, user_project=None, generation=None): 

679 """ 

680 property :attr:`name` 

681 Get the bucket's name. 

682 """ 

683 name = _validate_name(name) 

684 super(Bucket, self).__init__(name=name) 

685 self._client = client 

686 self._acl = BucketACL(self) 

687 self._default_object_acl = DefaultObjectACL(self) 

688 self._label_removals = set() 

689 self._user_project = user_project 

690 

691 if generation is not None: 

692 self._properties["generation"] = generation 

693 

694 def __repr__(self): 

695 return f"<Bucket: {self.name}>" 

696 

697 @property 

698 def client(self): 

699 """The client bound to this bucket.""" 

700 return self._client 

701 

702 def _set_properties(self, value): 

703 """Set the properties for the current object. 

704 

705 :type value: dict or :class:`google.cloud.storage.batch._FutureDict` 

706 :param value: The properties to be set. 

707 """ 

708 self._label_removals.clear() 

709 return super(Bucket, self)._set_properties(value) 

710 

711 @property 

712 def rpo(self): 

713 """Get the RPO (Recovery Point Objective) of this bucket 

714 

715 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

716 

717 "ASYNC_TURBO" or "DEFAULT" 

718 :rtype: str 

719 """ 

720 return self._properties.get("rpo") 

721 

722 @rpo.setter 

723 def rpo(self, value): 

724 """ 

725 Set the RPO (Recovery Point Objective) of this bucket. 

726 

727 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

728 

729 :type value: str 

730 :param value: "ASYNC_TURBO" or "DEFAULT" 

731 """ 

732 self._patch_property("rpo", value) 

733 

734 @property 

735 def user_project(self): 

736 """Project ID to be billed for API requests made via this bucket. 

737 

738 If unset, API requests are billed to the bucket owner. 

739 

740 A user project is required for all operations on Requester Pays buckets. 

741 

742 See https://cloud.google.com/storage/docs/requester-pays#requirements for details. 

743 

744 :rtype: str 

745 """ 

746 return self._user_project 

747 

748 @property 

749 def generation(self): 

750 """Retrieve the generation for the bucket. 

751 

752 :rtype: int or ``NoneType`` 

753 :returns: The generation of the bucket or ``None`` if the bucket's 

754 resource has not been loaded from the server. 

755 """ 

756 generation = self._properties.get("generation") 

757 if generation is not None: 

758 return int(generation) 

759 

760 @property 

761 def soft_delete_time(self): 

762 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted. 

763 

764 :rtype: :class:`datetime.datetime` or ``NoneType`` 

765 :returns: 

766 (readonly) The time that the bucket became soft-deleted. 

767 Note this property is only set for soft-deleted buckets. 

768 """ 

769 soft_delete_time = self._properties.get("softDeleteTime") 

770 if soft_delete_time is not None: 

771 return _rfc3339_nanos_to_datetime(soft_delete_time) 

772 

773 @property 

774 def hard_delete_time(self): 

775 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted. 

776 

777 :rtype: :class:`datetime.datetime` or ``NoneType`` 

778 :returns: 

779 (readonly) The time that the bucket will be permanently deleted. 

780 Note this property is only set for soft-deleted buckets. 

781 """ 

782 hard_delete_time = self._properties.get("hardDeleteTime") 

783 if hard_delete_time is not None: 

784 return _rfc3339_nanos_to_datetime(hard_delete_time) 

785 

786 @property 

787 def _query_params(self): 

788 """Default query parameters.""" 

789 params = super()._query_params 

790 return params 

791 

792 @classmethod 

793 def from_uri(cls, uri, client=None): 

794 """Get a constructor for bucket object by URI. 

795 

796 .. code-block:: python 

797 

798 from google.cloud import storage 

799 from google.cloud.storage.bucket import Bucket 

800 client = storage.Client() 

801 bucket = Bucket.from_uri("gs://bucket", client=client) 

802 

803 :type uri: str 

804 :param uri: The bucket uri pass to get bucket object. 

805 

806 :type client: :class:`~google.cloud.storage.client.Client` or 

807 ``NoneType`` 

808 :param client: (Optional) The client to use. Application code should 

809 *always* pass ``client``. 

810 

811 :rtype: :class:`google.cloud.storage.bucket.Bucket` 

812 :returns: The bucket object created. 

813 """ 

814 scheme, netloc, path, query, frag = urlsplit(uri) 

815 

816 if scheme != "gs": 

817 raise ValueError("URI scheme must be gs") 

818 

819 return cls(client, name=netloc) 

820 

821 @classmethod 

822 def from_string(cls, uri, client=None): 

823 """Get a constructor for bucket object by URI. 

824 

825 .. note:: 

826 Deprecated alias for :meth:`from_uri`. 

827 

828 .. code-block:: python 

829 

830 from google.cloud import storage 

831 from google.cloud.storage.bucket import Bucket 

832 client = storage.Client() 

833 bucket = Bucket.from_string("gs://bucket", client=client) 

834 

835 :type uri: str 

836 :param uri: The bucket uri pass to get bucket object. 

837 

838 :type client: :class:`~google.cloud.storage.client.Client` or 

839 ``NoneType`` 

840 :param client: (Optional) The client to use. Application code should 

841 *always* pass ``client``. 

842 

843 :rtype: :class:`google.cloud.storage.bucket.Bucket` 

844 :returns: The bucket object created. 

845 """ 

846 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2) 

847 return Bucket.from_uri(uri=uri, client=client) 

848 

849 def blob( 

850 self, 

851 blob_name, 

852 chunk_size=None, 

853 encryption_key=None, 

854 kms_key_name=None, 

855 generation=None, 

856 ): 

857 """Factory constructor for blob object. 

858 

859 .. note:: 

860 This will not make an HTTP request; it simply instantiates 

861 a blob object owned by this bucket. 

862 

863 :type blob_name: str 

864 :param blob_name: The name of the blob to be instantiated. 

865 

866 :type chunk_size: int 

867 :param chunk_size: The size of a chunk of data whenever iterating 

868 (in bytes). This must be a multiple of 256 KB per 

869 the API specification. 

870 

871 :type encryption_key: bytes 

872 :param encryption_key: 

873 (Optional) 32 byte encryption key for customer-supplied encryption. 

874 

875 :type kms_key_name: str 

876 :param kms_key_name: 

877 (Optional) Resource name of KMS key used to encrypt blob's content. 

878 

879 :type generation: long 

880 :param generation: (Optional) If present, selects a specific revision of 

881 this object. 

882 

883 :type crc32c_checksum: str 

884 :param crc32c_checksum: 

885 (Optional) If set, the CRC32C checksum of the blob's content. 

886 CRC32c checksum, as described in RFC 4960, Appendix B; encoded using 

887 base64 in big-endian byte order. See 

888 Apenndix B: https://datatracker.ietf.org/doc/html/rfc4960#appendix-B 

889 base64: https://datatracker.ietf.org/doc/html/rfc4648#section-4 

890 

891 :rtype: :class:`google.cloud.storage.blob.Blob` 

892 :returns: The blob object created. 

893 """ 

894 return Blob( 

895 name=blob_name, 

896 bucket=self, 

897 chunk_size=chunk_size, 

898 encryption_key=encryption_key, 

899 kms_key_name=kms_key_name, 

900 generation=generation, 

901 ) 

902 

903 def notification( 

904 self, 

905 topic_name=None, 

906 topic_project=None, 

907 custom_attributes=None, 

908 event_types=None, 

909 blob_name_prefix=None, 

910 payload_format=NONE_PAYLOAD_FORMAT, 

911 notification_id=None, 

912 ): 

913 """Factory: create a notification resource for the bucket. 

914 

915 See: :class:`.BucketNotification` for parameters. 

916 

917 :rtype: :class:`.BucketNotification` 

918 """ 

919 return BucketNotification( 

920 self, 

921 topic_name=topic_name, 

922 topic_project=topic_project, 

923 custom_attributes=custom_attributes, 

924 event_types=event_types, 

925 blob_name_prefix=blob_name_prefix, 

926 payload_format=payload_format, 

927 notification_id=notification_id, 

928 ) 

929 

930 def exists( 

931 self, 

932 client=None, 

933 timeout=_DEFAULT_TIMEOUT, 

934 if_etag_match=None, 

935 if_etag_not_match=None, 

936 if_metageneration_match=None, 

937 if_metageneration_not_match=None, 

938 retry=DEFAULT_RETRY, 

939 ): 

940 """Determines whether or not this bucket exists. 

941 

942 If :attr:`user_project` is set, bills the API request to that project. 

943 

944 :type client: :class:`~google.cloud.storage.client.Client` or 

945 ``NoneType`` 

946 :param client: (Optional) The client to use. If not passed, falls back 

947 to the ``client`` stored on the current bucket. 

948 

949 :type timeout: float or tuple 

950 :param timeout: 

951 (Optional) The amount of time, in seconds, to wait 

952 for the server response. See: :ref:`configuring_timeouts` 

953 

954 :type if_etag_match: Union[str, Set[str]] 

955 :param if_etag_match: (Optional) Make the operation conditional on whether the 

956 bucket's current ETag matches the given value. 

957 

958 :type if_etag_not_match: Union[str, Set[str]]) 

959 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

960 bucket's current ETag does not match the given value. 

961 

962 :type if_metageneration_match: long 

963 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

964 bucket's current metageneration matches the given value. 

965 

966 :type if_metageneration_not_match: long 

967 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

968 bucket's current metageneration does not match the given value. 

969 

970 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

971 :param retry: 

972 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

973 

974 :rtype: bool 

975 :returns: True if the bucket exists in Cloud Storage. 

976 """ 

977 with create_trace_span(name="Storage.Bucket.exists"): 

978 client = self._require_client(client) 

979 # We only need the status code (200 or not) so we seek to 

980 # minimize the returned payload. 

981 query_params = {"fields": "name"} 

982 

983 if self.user_project is not None: 

984 query_params["userProject"] = self.user_project 

985 

986 _add_generation_match_parameters( 

987 query_params, 

988 if_metageneration_match=if_metageneration_match, 

989 if_metageneration_not_match=if_metageneration_not_match, 

990 ) 

991 

992 headers = {} 

993 _add_etag_match_headers( 

994 headers, 

995 if_etag_match=if_etag_match, 

996 if_etag_not_match=if_etag_not_match, 

997 ) 

998 

999 try: 

1000 # We intentionally pass `_target_object=None` since fields=name 

1001 # would limit the local properties. 

1002 client._get_resource( 

1003 self.path, 

1004 query_params=query_params, 

1005 headers=headers, 

1006 timeout=timeout, 

1007 retry=retry, 

1008 _target_object=None, 

1009 ) 

1010 except NotFound: 

1011 # NOTE: This will not fail immediately in a batch. However, when 

1012 # Batch.finish() is called, the resulting `NotFound` will be 

1013 # raised. 

1014 return False 

1015 return True 

1016 

1017 def create( 

1018 self, 

1019 client=None, 

1020 project=None, 

1021 location=None, 

1022 predefined_acl=None, 

1023 predefined_default_object_acl=None, 

1024 enable_object_retention=False, 

1025 timeout=_DEFAULT_TIMEOUT, 

1026 retry=DEFAULT_RETRY, 

1027 ): 

1028 """Creates current bucket. 

1029 

1030 If the bucket already exists, will raise 

1031 :class:`google.cloud.exceptions.Conflict`. 

1032 

1033 This implements "storage.buckets.insert". 

1034 

1035 If :attr:`user_project` is set, bills the API request to that project. 

1036 

1037 :type client: :class:`~google.cloud.storage.client.Client` or 

1038 ``NoneType`` 

1039 :param client: (Optional) The client to use. If not passed, falls back 

1040 to the ``client`` stored on the current bucket. 

1041 

1042 :type project: str 

1043 :param project: (Optional) The project under which the bucket is to 

1044 be created. If not passed, uses the project set on 

1045 the client. 

1046 :raises ValueError: if ``project`` is None and client's 

1047 :attr:`project` is also None. 

1048 

1049 :type location: str 

1050 :param location: (Optional) The location of the bucket. If not passed, 

1051 the default location, US, will be used. See 

1052 https://cloud.google.com/storage/docs/bucket-locations 

1053 

1054 :type predefined_acl: str 

1055 :param predefined_acl: 

1056 (Optional) Name of predefined ACL to apply to bucket. See: 

1057 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

1058 

1059 :type predefined_default_object_acl: str 

1060 :param predefined_default_object_acl: 

1061 (Optional) Name of predefined ACL to apply to bucket's objects. See: 

1062 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

1063 

1064 :type enable_object_retention: bool 

1065 :param enable_object_retention: 

1066 (Optional) Whether object retention should be enabled on this bucket. See: 

1067 https://cloud.google.com/storage/docs/object-lock 

1068 

1069 :type timeout: float or tuple 

1070 :param timeout: 

1071 (Optional) The amount of time, in seconds, to wait 

1072 for the server response. See: :ref:`configuring_timeouts` 

1073 

1074 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1075 :param retry: 

1076 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1077 """ 

1078 with create_trace_span(name="Storage.Bucket.create"): 

1079 client = self._require_client(client) 

1080 client.create_bucket( 

1081 bucket_or_name=self, 

1082 project=project, 

1083 user_project=self.user_project, 

1084 location=location, 

1085 predefined_acl=predefined_acl, 

1086 predefined_default_object_acl=predefined_default_object_acl, 

1087 enable_object_retention=enable_object_retention, 

1088 timeout=timeout, 

1089 retry=retry, 

1090 ) 

1091 

1092 def update( 

1093 self, 

1094 client=None, 

1095 timeout=_DEFAULT_TIMEOUT, 

1096 if_metageneration_match=None, 

1097 if_metageneration_not_match=None, 

1098 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

1099 ): 

1100 """Sends all properties in a PUT request. 

1101 

1102 Updates the ``_properties`` with the response from the backend. 

1103 

1104 If :attr:`user_project` is set, bills the API request to that project. 

1105 

1106 :type client: :class:`~google.cloud.storage.client.Client` or 

1107 ``NoneType`` 

1108 :param client: the client to use. If not passed, falls back to the 

1109 ``client`` stored on the current object. 

1110 

1111 :type timeout: float or tuple 

1112 :param timeout: 

1113 (Optional) The amount of time, in seconds, to wait 

1114 for the server response. See: :ref:`configuring_timeouts` 

1115 

1116 :type if_metageneration_match: long 

1117 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1118 blob's current metageneration matches the given value. 

1119 

1120 :type if_metageneration_not_match: long 

1121 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1122 blob's current metageneration does not match the given value. 

1123 

1124 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1125 :param retry: 

1126 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1127 """ 

1128 with create_trace_span(name="Storage.Bucket.update"): 

1129 super(Bucket, self).update( 

1130 client=client, 

1131 timeout=timeout, 

1132 if_metageneration_match=if_metageneration_match, 

1133 if_metageneration_not_match=if_metageneration_not_match, 

1134 retry=retry, 

1135 ) 

1136 

1137 def reload( 

1138 self, 

1139 client=None, 

1140 projection="noAcl", 

1141 timeout=_DEFAULT_TIMEOUT, 

1142 if_etag_match=None, 

1143 if_etag_not_match=None, 

1144 if_metageneration_match=None, 

1145 if_metageneration_not_match=None, 

1146 retry=DEFAULT_RETRY, 

1147 soft_deleted=None, 

1148 ): 

1149 """Reload properties from Cloud Storage. 

1150 

1151 If :attr:`user_project` is set, bills the API request to that project. 

1152 

1153 :type client: :class:`~google.cloud.storage.client.Client` or 

1154 ``NoneType`` 

1155 :param client: the client to use. If not passed, falls back to the 

1156 ``client`` stored on the current object. 

1157 

1158 :type projection: str 

1159 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1160 Defaults to ``'noAcl'``. Specifies the set of 

1161 properties to return. 

1162 

1163 :type timeout: float or tuple 

1164 :param timeout: 

1165 (Optional) The amount of time, in seconds, to wait 

1166 for the server response. See: :ref:`configuring_timeouts` 

1167 

1168 :type if_etag_match: Union[str, Set[str]] 

1169 :param if_etag_match: (Optional) Make the operation conditional on whether the 

1170 bucket's current ETag matches the given value. 

1171 

1172 :type if_etag_not_match: Union[str, Set[str]]) 

1173 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

1174 bucket's current ETag does not match the given value. 

1175 

1176 :type if_metageneration_match: long 

1177 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1178 bucket's current metageneration matches the given value. 

1179 

1180 :type if_metageneration_not_match: long 

1181 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1182 bucket's current metageneration does not match the given value. 

1183 

1184 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1185 :param retry: 

1186 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1187 

1188 :type soft_deleted: bool 

1189 :param soft_deleted: (Optional) If True, looks for a soft-deleted 

1190 bucket. Will only return the bucket metadata if the bucket exists 

1191 and is in a soft-deleted state. The bucket ``generation`` must be 

1192 set if ``soft_deleted`` is set to True. 

1193 See: https://cloud.google.com/storage/docs/soft-delete 

1194 """ 

1195 with create_trace_span(name="Storage.Bucket.reload"): 

1196 super(Bucket, self).reload( 

1197 client=client, 

1198 projection=projection, 

1199 timeout=timeout, 

1200 if_etag_match=if_etag_match, 

1201 if_etag_not_match=if_etag_not_match, 

1202 if_metageneration_match=if_metageneration_match, 

1203 if_metageneration_not_match=if_metageneration_not_match, 

1204 retry=retry, 

1205 soft_deleted=soft_deleted, 

1206 ) 

1207 

1208 def patch( 

1209 self, 

1210 client=None, 

1211 timeout=_DEFAULT_TIMEOUT, 

1212 if_metageneration_match=None, 

1213 if_metageneration_not_match=None, 

1214 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

1215 ): 

1216 """Sends all changed properties in a PATCH request. 

1217 

1218 Updates the ``_properties`` with the response from the backend. 

1219 

1220 If :attr:`user_project` is set, bills the API request to that project. 

1221 

1222 :type client: :class:`~google.cloud.storage.client.Client` or 

1223 ``NoneType`` 

1224 :param client: the client to use. If not passed, falls back to the 

1225 ``client`` stored on the current object. 

1226 

1227 :type timeout: float or tuple 

1228 :param timeout: 

1229 (Optional) The amount of time, in seconds, to wait 

1230 for the server response. See: :ref:`configuring_timeouts` 

1231 

1232 :type if_metageneration_match: long 

1233 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1234 blob's current metageneration matches the given value. 

1235 

1236 :type if_metageneration_not_match: long 

1237 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1238 blob's current metageneration does not match the given value. 

1239 

1240 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1241 :param retry: 

1242 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1243 """ 

1244 with create_trace_span(name="Storage.Bucket.patch"): 

1245 # Special case: For buckets, it is possible that labels are being 

1246 # removed; this requires special handling. 

1247 if self._label_removals: 

1248 self._changes.add("labels") 

1249 self._properties.setdefault("labels", {}) 

1250 for removed_label in self._label_removals: 

1251 self._properties["labels"][removed_label] = None 

1252 

1253 # Call the superclass method. 

1254 super(Bucket, self).patch( 

1255 client=client, 

1256 if_metageneration_match=if_metageneration_match, 

1257 if_metageneration_not_match=if_metageneration_not_match, 

1258 timeout=timeout, 

1259 retry=retry, 

1260 ) 

1261 

1262 @property 

1263 def acl(self): 

1264 """Create our ACL on demand.""" 

1265 return self._acl 

1266 

1267 @property 

1268 def default_object_acl(self): 

1269 """Create our defaultObjectACL on demand.""" 

1270 return self._default_object_acl 

1271 

1272 @staticmethod 

1273 def path_helper(bucket_name): 

1274 """Relative URL path for a bucket. 

1275 

1276 :type bucket_name: str 

1277 :param bucket_name: The bucket name in the path. 

1278 

1279 :rtype: str 

1280 :returns: The relative URL path for ``bucket_name``. 

1281 """ 

1282 return "/b/" + bucket_name 

1283 

1284 @property 

1285 def path(self): 

1286 """The URL path to this bucket.""" 

1287 if not self.name: 

1288 raise ValueError("Cannot determine path without bucket name.") 

1289 

1290 return self.path_helper(self.name) 

1291 

1292 def get_blob( 

1293 self, 

1294 blob_name, 

1295 client=None, 

1296 encryption_key=None, 

1297 generation=None, 

1298 if_etag_match=None, 

1299 if_etag_not_match=None, 

1300 if_generation_match=None, 

1301 if_generation_not_match=None, 

1302 if_metageneration_match=None, 

1303 if_metageneration_not_match=None, 

1304 timeout=_DEFAULT_TIMEOUT, 

1305 retry=DEFAULT_RETRY, 

1306 soft_deleted=None, 

1307 **kwargs, 

1308 ): 

1309 """Get a blob object by name. 

1310 

1311 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python) 

1312 on how to retrieve metadata of an object. 

1313 

1314 If :attr:`user_project` is set, bills the API request to that project. 

1315 

1316 :type blob_name: str 

1317 :param blob_name: The name of the blob to retrieve. 

1318 

1319 :type client: :class:`~google.cloud.storage.client.Client` or 

1320 ``NoneType`` 

1321 :param client: (Optional) The client to use. If not passed, falls back 

1322 to the ``client`` stored on the current bucket. 

1323 

1324 :type encryption_key: bytes 

1325 :param encryption_key: 

1326 (Optional) 32 byte encryption key for customer-supplied encryption. 

1327 See 

1328 https://cloud.google.com/storage/docs/encryption#customer-supplied. 

1329 

1330 :type generation: long 

1331 :param generation: 

1332 (Optional) If present, selects a specific revision of this object. 

1333 

1334 :type if_etag_match: Union[str, Set[str]] 

1335 :param if_etag_match: 

1336 (Optional) See :ref:`using-if-etag-match` 

1337 

1338 :type if_etag_not_match: Union[str, Set[str]] 

1339 :param if_etag_not_match: 

1340 (Optional) See :ref:`using-if-etag-not-match` 

1341 

1342 :type if_generation_match: long 

1343 :param if_generation_match: 

1344 (Optional) See :ref:`using-if-generation-match` 

1345 

1346 :type if_generation_not_match: long 

1347 :param if_generation_not_match: 

1348 (Optional) See :ref:`using-if-generation-not-match` 

1349 

1350 :type if_metageneration_match: long 

1351 :param if_metageneration_match: 

1352 (Optional) See :ref:`using-if-metageneration-match` 

1353 

1354 :type if_metageneration_not_match: long 

1355 :param if_metageneration_not_match: 

1356 (Optional) See :ref:`using-if-metageneration-not-match` 

1357 

1358 :type timeout: float or tuple 

1359 :param timeout: 

1360 (Optional) The amount of time, in seconds, to wait 

1361 for the server response. See: :ref:`configuring_timeouts` 

1362 

1363 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1364 :param retry: 

1365 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1366 

1367 :type soft_deleted: bool 

1368 :param soft_deleted: 

1369 (Optional) If True, looks for a soft-deleted object. Will only return 

1370 the object metadata if the object exists and is in a soft-deleted state. 

1371 Object ``generation`` is required if ``soft_deleted`` is set to True. 

1372 See: https://cloud.google.com/storage/docs/soft-delete 

1373 

1374 :param kwargs: Keyword arguments to pass to the 

1375 :class:`~google.cloud.storage.blob.Blob` constructor. 

1376 

1377 :rtype: :class:`google.cloud.storage.blob.Blob` or None 

1378 :returns: The blob object if it exists, otherwise None. 

1379 """ 

1380 with create_trace_span(name="Storage.Bucket.getBlob"): 

1381 blob = Blob( 

1382 bucket=self, 

1383 name=blob_name, 

1384 encryption_key=encryption_key, 

1385 generation=generation, 

1386 **kwargs, 

1387 ) 

1388 try: 

1389 # NOTE: This will not fail immediately in a batch. However, when 

1390 # Batch.finish() is called, the resulting `NotFound` will be 

1391 # raised. 

1392 blob.reload( 

1393 client=client, 

1394 timeout=timeout, 

1395 if_etag_match=if_etag_match, 

1396 if_etag_not_match=if_etag_not_match, 

1397 if_generation_match=if_generation_match, 

1398 if_generation_not_match=if_generation_not_match, 

1399 if_metageneration_match=if_metageneration_match, 

1400 if_metageneration_not_match=if_metageneration_not_match, 

1401 retry=retry, 

1402 soft_deleted=soft_deleted, 

1403 ) 

1404 except NotFound: 

1405 return None 

1406 else: 

1407 return blob 

1408 

1409 def list_blobs( 

1410 self, 

1411 max_results=None, 

1412 page_token=None, 

1413 prefix=None, 

1414 delimiter=None, 

1415 start_offset=None, 

1416 end_offset=None, 

1417 include_trailing_delimiter=None, 

1418 versions=None, 

1419 projection="noAcl", 

1420 fields=None, 

1421 client=None, 

1422 timeout=_DEFAULT_TIMEOUT, 

1423 retry=DEFAULT_RETRY, 

1424 match_glob=None, 

1425 include_folders_as_prefixes=None, 

1426 soft_deleted=None, 

1427 page_size=None, 

1428 ): 

1429 """Return an iterator used to find blobs in the bucket. 

1430 

1431 If :attr:`user_project` is set, bills the API request to that project. 

1432 

1433 :type max_results: int 

1434 :param max_results: 

1435 (Optional) The maximum number of blobs to return. 

1436 

1437 :type page_token: str 

1438 :param page_token: 

1439 (Optional) If present, return the next batch of blobs, using the 

1440 value, which must correspond to the ``nextPageToken`` value 

1441 returned in the previous response. Deprecated: use the ``pages`` 

1442 property of the returned iterator instead of manually passing the 

1443 token. 

1444 

1445 :type prefix: str 

1446 :param prefix: (Optional) Prefix used to filter blobs. 

1447 

1448 :type delimiter: str 

1449 :param delimiter: (Optional) Delimiter, used with ``prefix`` to 

1450 emulate hierarchy. 

1451 

1452 :type start_offset: str 

1453 :param start_offset: 

1454 (Optional) Filter results to objects whose names are 

1455 lexicographically equal to or after ``startOffset``. If 

1456 ``endOffset`` is also set, the objects listed will have names 

1457 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive). 

1458 

1459 :type end_offset: str 

1460 :param end_offset: 

1461 (Optional) Filter results to objects whose names are 

1462 lexicographically before ``endOffset``. If ``startOffset`` is also 

1463 set, the objects listed will have names between ``startOffset`` 

1464 (inclusive) and ``endOffset`` (exclusive). 

1465 

1466 :type include_trailing_delimiter: boolean 

1467 :param include_trailing_delimiter: 

1468 (Optional) If true, objects that end in exactly one instance of 

1469 ``delimiter`` will have their metadata included in ``items`` in 

1470 addition to ``prefixes``. 

1471 

1472 :type versions: bool 

1473 :param versions: (Optional) Whether object versions should be returned 

1474 as separate blobs. 

1475 

1476 :type projection: str 

1477 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1478 Defaults to ``'noAcl'``. Specifies the set of 

1479 properties to return. 

1480 

1481 :type fields: str 

1482 :param fields: 

1483 (Optional) Selector specifying which fields to include 

1484 in a partial response. Must be a list of fields. For 

1485 example to get a partial response with just the next 

1486 page token and the name and language of each blob returned: 

1487 ``'items(name,contentLanguage),nextPageToken'``. 

1488 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields 

1489 

1490 :type client: :class:`~google.cloud.storage.client.Client` 

1491 :param client: (Optional) The client to use. If not passed, falls back 

1492 to the ``client`` stored on the current bucket. 

1493 

1494 :type timeout: float or tuple 

1495 :param timeout: 

1496 (Optional) The amount of time, in seconds, to wait 

1497 for the server response. See: :ref:`configuring_timeouts` 

1498 

1499 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1500 :param retry: 

1501 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1502 

1503 :type match_glob: str 

1504 :param match_glob: 

1505 (Optional) A glob pattern used to filter results (for example, foo*bar). 

1506 The string value must be UTF-8 encoded. See: 

1507 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob 

1508 

1509 :type include_folders_as_prefixes: bool 

1510 (Optional) If true, includes Folders and Managed Folders in the set of 

1511 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /. 

1512 See: https://cloud.google.com/storage/docs/managed-folders 

1513 

1514 :type soft_deleted: bool 

1515 :param soft_deleted: 

1516 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing 

1517 generation number. This parameter can only be used successfully if the bucket has a soft delete policy. 

1518 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See: 

1519 https://cloud.google.com/storage/docs/soft-delete 

1520 

1521 :type page_size: int 

1522 :param page_size: 

1523 (Optional) Maximum number of blobs to return in each page. 

1524 Defaults to a value set by the API. 

1525 

1526 :rtype: :class:`~google.api_core.page_iterator.Iterator` 

1527 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` 

1528 in this bucket matching the arguments. 

1529 """ 

1530 with create_trace_span(name="Storage.Bucket.listBlobs"): 

1531 client = self._require_client(client) 

1532 return client.list_blobs( 

1533 self, 

1534 max_results=max_results, 

1535 page_token=page_token, 

1536 prefix=prefix, 

1537 delimiter=delimiter, 

1538 start_offset=start_offset, 

1539 end_offset=end_offset, 

1540 include_trailing_delimiter=include_trailing_delimiter, 

1541 versions=versions, 

1542 projection=projection, 

1543 fields=fields, 

1544 page_size=page_size, 

1545 timeout=timeout, 

1546 retry=retry, 

1547 match_glob=match_glob, 

1548 include_folders_as_prefixes=include_folders_as_prefixes, 

1549 soft_deleted=soft_deleted, 

1550 ) 

1551 

1552 def list_notifications( 

1553 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

1554 ): 

1555 """List Pub / Sub notifications for this bucket. 

1556 

1557 See: 

1558 https://cloud.google.com/storage/docs/json_api/v1/notifications/list 

1559 

1560 If :attr:`user_project` is set, bills the API request to that project. 

1561 

1562 :type client: :class:`~google.cloud.storage.client.Client` or 

1563 ``NoneType`` 

1564 :param client: (Optional) The client to use. If not passed, falls back 

1565 to the ``client`` stored on the current bucket. 

1566 :type timeout: float or tuple 

1567 :param timeout: 

1568 (Optional) The amount of time, in seconds, to wait 

1569 for the server response. See: :ref:`configuring_timeouts` 

1570 

1571 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1572 :param retry: 

1573 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1574 

1575 :rtype: list of :class:`.BucketNotification` 

1576 :returns: notification instances 

1577 """ 

1578 with create_trace_span(name="Storage.Bucket.listNotifications"): 

1579 client = self._require_client(client) 

1580 path = self.path + "/notificationConfigs" 

1581 iterator = client._list_resource( 

1582 path, 

1583 _item_to_notification, 

1584 timeout=timeout, 

1585 retry=retry, 

1586 ) 

1587 iterator.bucket = self 

1588 return iterator 

1589 

1590 def get_notification( 

1591 self, 

1592 notification_id, 

1593 client=None, 

1594 timeout=_DEFAULT_TIMEOUT, 

1595 retry=DEFAULT_RETRY, 

1596 ): 

1597 """Get Pub / Sub notification for this bucket. 

1598 

1599 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get) 

1600 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python). 

1601 

1602 If :attr:`user_project` is set, bills the API request to that project. 

1603 

1604 :type notification_id: str 

1605 :param notification_id: The notification id to retrieve the notification configuration. 

1606 

1607 :type client: :class:`~google.cloud.storage.client.Client` or 

1608 ``NoneType`` 

1609 :param client: (Optional) The client to use. If not passed, falls back 

1610 to the ``client`` stored on the current bucket. 

1611 :type timeout: float or tuple 

1612 :param timeout: 

1613 (Optional) The amount of time, in seconds, to wait 

1614 for the server response. See: :ref:`configuring_timeouts` 

1615 

1616 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1617 :param retry: 

1618 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1619 

1620 :rtype: :class:`.BucketNotification` 

1621 :returns: notification instance. 

1622 """ 

1623 with create_trace_span(name="Storage.Bucket.getNotification"): 

1624 notification = self.notification(notification_id=notification_id) 

1625 notification.reload(client=client, timeout=timeout, retry=retry) 

1626 return notification 

1627 

1628 def delete( 

1629 self, 

1630 force=False, 

1631 client=None, 

1632 if_metageneration_match=None, 

1633 if_metageneration_not_match=None, 

1634 timeout=_DEFAULT_TIMEOUT, 

1635 retry=DEFAULT_RETRY, 

1636 ): 

1637 """Delete this bucket. 

1638 

1639 The bucket **must** be empty in order to submit a delete request. If 

1640 ``force=True`` is passed, this will first attempt to delete all the 

1641 objects / blobs in the bucket (i.e. try to empty the bucket). 

1642 

1643 If the bucket doesn't exist, this will raise 

1644 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty 

1645 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`. 

1646 

1647 If ``force=True`` and the bucket contains more than 256 objects / blobs 

1648 this will cowardly refuse to delete the objects (or the bucket). This 

1649 is to prevent accidental bucket deletion and to prevent extremely long 

1650 runtime of this method. Also note that ``force=True`` is not supported 

1651 in a ``Batch`` context. 

1652 

1653 If :attr:`user_project` is set, bills the API request to that project. 

1654 

1655 :type force: bool 

1656 :param force: If True, empties the bucket's objects then deletes it. 

1657 

1658 :type client: :class:`~google.cloud.storage.client.Client` or 

1659 ``NoneType`` 

1660 :param client: (Optional) The client to use. If not passed, falls back 

1661 to the ``client`` stored on the current bucket. 

1662 

1663 :type if_metageneration_match: long 

1664 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1665 blob's current metageneration matches the given value. 

1666 

1667 :type if_metageneration_not_match: long 

1668 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1669 blob's current metageneration does not match the given value. 

1670 

1671 :type timeout: float or tuple 

1672 :param timeout: 

1673 (Optional) The amount of time, in seconds, to wait 

1674 for the server response. See: :ref:`configuring_timeouts` 

1675 

1676 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1677 :param retry: 

1678 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1679 

1680 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket 

1681 contains more than 256 objects / blobs. 

1682 """ 

1683 with create_trace_span(name="Storage.Bucket.delete"): 

1684 client = self._require_client(client) 

1685 query_params = {} 

1686 

1687 if self.user_project is not None: 

1688 query_params["userProject"] = self.user_project 

1689 

1690 _add_generation_match_parameters( 

1691 query_params, 

1692 if_metageneration_match=if_metageneration_match, 

1693 if_metageneration_not_match=if_metageneration_not_match, 

1694 ) 

1695 if force: 

1696 blobs = list( 

1697 self.list_blobs( 

1698 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

1699 client=client, 

1700 timeout=timeout, 

1701 retry=retry, 

1702 versions=True, 

1703 ) 

1704 ) 

1705 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

1706 message = ( 

1707 "Refusing to delete bucket with more than " 

1708 "%d objects. If you actually want to delete " 

1709 "this bucket, please delete the objects " 

1710 "yourself before calling Bucket.delete()." 

1711 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

1712 raise ValueError(message) 

1713 

1714 # Ignore 404 errors on delete. 

1715 self.delete_blobs( 

1716 blobs, 

1717 on_error=lambda blob: None, 

1718 client=client, 

1719 timeout=timeout, 

1720 retry=retry, 

1721 preserve_generation=True, 

1722 ) 

1723 

1724 # We intentionally pass `_target_object=None` since a DELETE 

1725 # request has no response value (whether in a standard request or 

1726 # in a batch request). 

1727 client._delete_resource( 

1728 self.path, 

1729 query_params=query_params, 

1730 timeout=timeout, 

1731 retry=retry, 

1732 _target_object=None, 

1733 ) 

1734 

1735 def delete_blob( 

1736 self, 

1737 blob_name, 

1738 client=None, 

1739 generation=None, 

1740 if_generation_match=None, 

1741 if_generation_not_match=None, 

1742 if_metageneration_match=None, 

1743 if_metageneration_not_match=None, 

1744 timeout=_DEFAULT_TIMEOUT, 

1745 retry=DEFAULT_RETRY, 

1746 ): 

1747 """Deletes a blob from the current bucket. 

1748 

1749 If :attr:`user_project` is set, bills the API request to that project. 

1750 

1751 :type blob_name: str 

1752 :param blob_name: A blob name to delete. 

1753 

1754 :type client: :class:`~google.cloud.storage.client.Client` or 

1755 ``NoneType`` 

1756 :param client: (Optional) The client to use. If not passed, falls back 

1757 to the ``client`` stored on the current bucket. 

1758 

1759 :type generation: long 

1760 :param generation: (Optional) If present, permanently deletes a specific 

1761 revision of this object. 

1762 

1763 :type if_generation_match: long 

1764 :param if_generation_match: 

1765 (Optional) See :ref:`using-if-generation-match` 

1766 

1767 :type if_generation_not_match: long 

1768 :param if_generation_not_match: 

1769 (Optional) See :ref:`using-if-generation-not-match` 

1770 

1771 :type if_metageneration_match: long 

1772 :param if_metageneration_match: 

1773 (Optional) See :ref:`using-if-metageneration-match` 

1774 

1775 :type if_metageneration_not_match: long 

1776 :param if_metageneration_not_match: 

1777 (Optional) See :ref:`using-if-metageneration-not-match` 

1778 

1779 :type timeout: float or tuple 

1780 :param timeout: 

1781 (Optional) The amount of time, in seconds, to wait 

1782 for the server response. See: :ref:`configuring_timeouts` 

1783 

1784 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1785 :param retry: (Optional) How to retry the RPC. A None value will disable 

1786 retries. A google.api_core.retry.Retry value will enable retries, 

1787 and the object will define retriable response codes and errors and 

1788 configure backoff and timeout options. 

1789 

1790 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a 

1791 Retry object and activates it only if certain conditions are met. 

1792 This class exists to provide safe defaults for RPC calls that are 

1793 not technically safe to retry normally (due to potential data 

1794 duplication or other side-effects) but become safe to retry if a 

1795 condition such as if_generation_match is set. 

1796 

1797 See the retry.py source code and docstrings in this package 

1798 (google.cloud.storage.retry) for information on retry types and how 

1799 to configure them. 

1800 

1801 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound 

1802 if the blob isn't found. To suppress 

1803 the exception, use :meth:`delete_blobs` by passing a no-op 

1804 ``on_error`` callback. 

1805 """ 

1806 with create_trace_span(name="Storage.Bucket.deleteBlob"): 

1807 client = self._require_client(client) 

1808 blob = Blob(blob_name, bucket=self, generation=generation) 

1809 

1810 query_params = copy.deepcopy(blob._query_params) 

1811 _add_generation_match_parameters( 

1812 query_params, 

1813 if_generation_match=if_generation_match, 

1814 if_generation_not_match=if_generation_not_match, 

1815 if_metageneration_match=if_metageneration_match, 

1816 if_metageneration_not_match=if_metageneration_not_match, 

1817 ) 

1818 # We intentionally pass `_target_object=None` since a DELETE 

1819 # request has no response value (whether in a standard request or 

1820 # in a batch request). 

1821 client._delete_resource( 

1822 blob.path, 

1823 query_params=query_params, 

1824 timeout=timeout, 

1825 retry=retry, 

1826 _target_object=None, 

1827 ) 

1828 

1829 def delete_blobs( 

1830 self, 

1831 blobs, 

1832 on_error=None, 

1833 client=None, 

1834 preserve_generation=False, 

1835 timeout=_DEFAULT_TIMEOUT, 

1836 if_generation_match=None, 

1837 if_generation_not_match=None, 

1838 if_metageneration_match=None, 

1839 if_metageneration_not_match=None, 

1840 retry=DEFAULT_RETRY, 

1841 ): 

1842 """Deletes a list of blobs from the current bucket. 

1843 

1844 Uses :meth:`delete_blob` to delete each individual blob. 

1845 

1846 By default, any generation information in the list of blobs is ignored, and the 

1847 live versions of all blobs are deleted. Set `preserve_generation` to True 

1848 if blob generation should instead be propagated from the list of blobs. 

1849 

1850 If :attr:`user_project` is set, bills the API request to that project. 

1851 

1852 :type blobs: list 

1853 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or 

1854 blob names to delete. 

1855 

1856 :type on_error: callable 

1857 :param on_error: (Optional) Takes single argument: ``blob``. 

1858 Called once for each blob raising 

1859 :class:`~google.cloud.exceptions.NotFound`; 

1860 otherwise, the exception is propagated. 

1861 Note that ``on_error`` is not supported in a ``Batch`` context. 

1862 

1863 :type client: :class:`~google.cloud.storage.client.Client` 

1864 :param client: (Optional) The client to use. If not passed, falls back 

1865 to the ``client`` stored on the current bucket. 

1866 

1867 :type preserve_generation: bool 

1868 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object, 

1869 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob 

1870 objects can have their generation set in this way. 

1871 Default: False. 

1872 

1873 :type if_generation_match: list of long 

1874 :param if_generation_match: 

1875 (Optional) See :ref:`using-if-generation-match` 

1876 Note that the length of the list must match the length of 

1877 The list must match ``blobs`` item-to-item. 

1878 

1879 :type if_generation_not_match: list of long 

1880 :param if_generation_not_match: 

1881 (Optional) See :ref:`using-if-generation-not-match` 

1882 The list must match ``blobs`` item-to-item. 

1883 

1884 :type if_metageneration_match: list of long 

1885 :param if_metageneration_match: 

1886 (Optional) See :ref:`using-if-metageneration-match` 

1887 The list must match ``blobs`` item-to-item. 

1888 

1889 :type if_metageneration_not_match: list of long 

1890 :param if_metageneration_not_match: 

1891 (Optional) See :ref:`using-if-metageneration-not-match` 

1892 The list must match ``blobs`` item-to-item. 

1893 

1894 :type timeout: float or tuple 

1895 :param timeout: 

1896 (Optional) The amount of time, in seconds, to wait 

1897 for the server response. See: :ref:`configuring_timeouts` 

1898 

1899 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1900 :param retry: (Optional) How to retry the RPC. A None value will disable 

1901 retries. A google.api_core.retry.Retry value will enable retries, 

1902 and the object will define retriable response codes and errors and 

1903 configure backoff and timeout options. 

1904 

1905 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a 

1906 Retry object and activates it only if certain conditions are met. 

1907 This class exists to provide safe defaults for RPC calls that are 

1908 not technically safe to retry normally (due to potential data 

1909 duplication or other side-effects) but become safe to retry if a 

1910 condition such as if_generation_match is set. 

1911 

1912 See the retry.py source code and docstrings in this package 

1913 (google.cloud.storage.retry) for information on retry types and how 

1914 to configure them. 

1915 

1916 :raises: :class:`~google.cloud.exceptions.NotFound` (if 

1917 `on_error` is not passed). 

1918 """ 

1919 with create_trace_span(name="Storage.Bucket.deleteBlobs"): 

1920 _raise_if_len_differs( 

1921 len(blobs), 

1922 if_generation_match=if_generation_match, 

1923 if_generation_not_match=if_generation_not_match, 

1924 if_metageneration_match=if_metageneration_match, 

1925 if_metageneration_not_match=if_metageneration_not_match, 

1926 ) 

1927 if_generation_match = iter(if_generation_match or []) 

1928 if_generation_not_match = iter(if_generation_not_match or []) 

1929 if_metageneration_match = iter(if_metageneration_match or []) 

1930 if_metageneration_not_match = iter(if_metageneration_not_match or []) 

1931 

1932 for blob in blobs: 

1933 try: 

1934 blob_name = blob 

1935 generation = None 

1936 if not isinstance(blob_name, str): 

1937 blob_name = blob.name 

1938 generation = blob.generation if preserve_generation else None 

1939 

1940 self.delete_blob( 

1941 blob_name, 

1942 client=client, 

1943 generation=generation, 

1944 if_generation_match=next(if_generation_match, None), 

1945 if_generation_not_match=next(if_generation_not_match, None), 

1946 if_metageneration_match=next(if_metageneration_match, None), 

1947 if_metageneration_not_match=next( 

1948 if_metageneration_not_match, None 

1949 ), 

1950 timeout=timeout, 

1951 retry=retry, 

1952 ) 

1953 except NotFound: 

1954 if on_error is not None: 

1955 on_error(blob) 

1956 else: 

1957 raise 

1958 

1959 def copy_blob( 

1960 self, 

1961 blob, 

1962 destination_bucket, 

1963 new_name=None, 

1964 client=None, 

1965 preserve_acl=True, 

1966 source_generation=None, 

1967 if_generation_match=None, 

1968 if_generation_not_match=None, 

1969 if_metageneration_match=None, 

1970 if_metageneration_not_match=None, 

1971 if_source_generation_match=None, 

1972 if_source_generation_not_match=None, 

1973 if_source_metageneration_match=None, 

1974 if_source_metageneration_not_match=None, 

1975 timeout=_DEFAULT_TIMEOUT, 

1976 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1977 ): 

1978 """Copy the given blob to the given bucket, optionally with a new name. 

1979 

1980 If :attr:`user_project` is set, bills the API request to that project. 

1981 

1982 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy) 

1983 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python). 

1984 

1985 :type blob: :class:`google.cloud.storage.blob.Blob` 

1986 :param blob: The blob to be copied. 

1987 

1988 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` 

1989 :param destination_bucket: The bucket into which the blob should be 

1990 copied. 

1991 

1992 :type new_name: str 

1993 :param new_name: (Optional) The new name for the copied file. 

1994 

1995 :type client: :class:`~google.cloud.storage.client.Client` or 

1996 ``NoneType`` 

1997 :param client: (Optional) The client to use. If not passed, falls back 

1998 to the ``client`` stored on the current bucket. 

1999 

2000 :type preserve_acl: bool 

2001 :param preserve_acl: DEPRECATED. This argument is not functional! 

2002 (Optional) Copies ACL from old blob to new blob. 

2003 Default: True. 

2004 Note that ``preserve_acl`` is not supported in a 

2005 ``Batch`` context. 

2006 

2007 :type source_generation: long 

2008 :param source_generation: (Optional) The generation of the blob to be 

2009 copied. 

2010 

2011 :type if_generation_match: long 

2012 :param if_generation_match: 

2013 (Optional) See :ref:`using-if-generation-match` 

2014 Note that the generation to be matched is that of the 

2015 ``destination`` blob. 

2016 

2017 :type if_generation_not_match: long 

2018 :param if_generation_not_match: 

2019 (Optional) See :ref:`using-if-generation-not-match` 

2020 Note that the generation to be matched is that of the 

2021 ``destination`` blob. 

2022 

2023 :type if_metageneration_match: long 

2024 :param if_metageneration_match: 

2025 (Optional) See :ref:`using-if-metageneration-match` 

2026 Note that the metageneration to be matched is that of the 

2027 ``destination`` blob. 

2028 

2029 :type if_metageneration_not_match: long 

2030 :param if_metageneration_not_match: 

2031 (Optional) See :ref:`using-if-metageneration-not-match` 

2032 Note that the metageneration to be matched is that of the 

2033 ``destination`` blob. 

2034 

2035 :type if_source_generation_match: long 

2036 :param if_source_generation_match: 

2037 (Optional) Makes the operation conditional on whether the source 

2038 object's generation matches the given value. 

2039 

2040 :type if_source_generation_not_match: long 

2041 :param if_source_generation_not_match: 

2042 (Optional) Makes the operation conditional on whether the source 

2043 object's generation does not match the given value. 

2044 

2045 :type if_source_metageneration_match: long 

2046 :param if_source_metageneration_match: 

2047 (Optional) Makes the operation conditional on whether the source 

2048 object's current metageneration matches the given value. 

2049 

2050 :type if_source_metageneration_not_match: long 

2051 :param if_source_metageneration_not_match: 

2052 (Optional) Makes the operation conditional on whether the source 

2053 object's current metageneration does not match the given value. 

2054 

2055 :type timeout: float or tuple 

2056 :param timeout: 

2057 (Optional) The amount of time, in seconds, to wait 

2058 for the server response. See: :ref:`configuring_timeouts` 

2059 

2060 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2061 :param retry: 

2062 (Optional) How to retry the RPC. 

2063 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry 

2064 policy which will only enable retries if ``if_generation_match`` or ``generation`` 

2065 is set, in order to ensure requests are idempotent before retrying them. 

2066 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object 

2067 to enable retries regardless of generation precondition setting. 

2068 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2069 

2070 :rtype: :class:`google.cloud.storage.blob.Blob` 

2071 :returns: The new Blob. 

2072 """ 

2073 with create_trace_span(name="Storage.Bucket.copyBlob"): 

2074 client = self._require_client(client) 

2075 query_params = {} 

2076 

2077 if self.user_project is not None: 

2078 query_params["userProject"] = self.user_project 

2079 

2080 if source_generation is not None: 

2081 query_params["sourceGeneration"] = source_generation 

2082 

2083 _add_generation_match_parameters( 

2084 query_params, 

2085 if_generation_match=if_generation_match, 

2086 if_generation_not_match=if_generation_not_match, 

2087 if_metageneration_match=if_metageneration_match, 

2088 if_metageneration_not_match=if_metageneration_not_match, 

2089 if_source_generation_match=if_source_generation_match, 

2090 if_source_generation_not_match=if_source_generation_not_match, 

2091 if_source_metageneration_match=if_source_metageneration_match, 

2092 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2093 ) 

2094 

2095 if new_name is None: 

2096 new_name = blob.name 

2097 

2098 new_blob = Blob(bucket=destination_bucket, name=new_name) 

2099 api_path = blob.path + "/copyTo" + new_blob.path 

2100 copy_result = client._post_resource( 

2101 api_path, 

2102 None, 

2103 query_params=query_params, 

2104 timeout=timeout, 

2105 retry=retry, 

2106 _target_object=new_blob, 

2107 ) 

2108 

2109 if not preserve_acl: 

2110 new_blob.acl.save(acl={}, client=client, timeout=timeout) 

2111 

2112 new_blob._set_properties(copy_result) 

2113 return new_blob 

2114 

2115 def rename_blob( 

2116 self, 

2117 blob, 

2118 new_name, 

2119 client=None, 

2120 if_generation_match=None, 

2121 if_generation_not_match=None, 

2122 if_metageneration_match=None, 

2123 if_metageneration_not_match=None, 

2124 if_source_generation_match=None, 

2125 if_source_generation_not_match=None, 

2126 if_source_metageneration_match=None, 

2127 if_source_metageneration_not_match=None, 

2128 timeout=_DEFAULT_TIMEOUT, 

2129 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2130 ): 

2131 """Rename the given blob using copy and delete operations. 

2132 

2133 If :attr:`user_project` is set, bills the API request to that project. 

2134 

2135 Effectively, copies blob to the same bucket with a new name, then 

2136 deletes the blob. 

2137 

2138 .. warning:: 

2139 

2140 This method will first duplicate the data and then delete the 

2141 old blob. This means that with very large objects renaming 

2142 could be a very (temporarily) costly or a very slow operation. 

2143 If you need more control over the copy and deletion, instead 

2144 use ``google.cloud.storage.blob.Blob.copy_to`` and 

2145 ``google.cloud.storage.blob.Blob.delete`` directly. 

2146 

2147 Also note that this method is not fully supported in a 

2148 ``Batch`` context. 

2149 

2150 :type blob: :class:`google.cloud.storage.blob.Blob` 

2151 :param blob: The blob to be renamed. 

2152 

2153 :type new_name: str 

2154 :param new_name: The new name for this blob. 

2155 

2156 :type client: :class:`~google.cloud.storage.client.Client` or 

2157 ``NoneType`` 

2158 :param client: (Optional) The client to use. If not passed, falls back 

2159 to the ``client`` stored on the current bucket. 

2160 

2161 :type if_generation_match: long 

2162 :param if_generation_match: 

2163 (Optional) See :ref:`using-if-generation-match` 

2164 Note that the generation to be matched is that of the 

2165 ``destination`` blob. 

2166 

2167 :type if_generation_not_match: long 

2168 :param if_generation_not_match: 

2169 (Optional) See :ref:`using-if-generation-not-match` 

2170 Note that the generation to be matched is that of the 

2171 ``destination`` blob. 

2172 

2173 :type if_metageneration_match: long 

2174 :param if_metageneration_match: 

2175 (Optional) See :ref:`using-if-metageneration-match` 

2176 Note that the metageneration to be matched is that of the 

2177 ``destination`` blob. 

2178 

2179 :type if_metageneration_not_match: long 

2180 :param if_metageneration_not_match: 

2181 (Optional) See :ref:`using-if-metageneration-not-match` 

2182 Note that the metageneration to be matched is that of the 

2183 ``destination`` blob. 

2184 

2185 :type if_source_generation_match: long 

2186 :param if_source_generation_match: 

2187 (Optional) Makes the operation conditional on whether the source 

2188 object's generation matches the given value. Also used in the 

2189 (implied) delete request. 

2190 

2191 :type if_source_generation_not_match: long 

2192 :param if_source_generation_not_match: 

2193 (Optional) Makes the operation conditional on whether the source 

2194 object's generation does not match the given value. Also used in 

2195 the (implied) delete request. 

2196 

2197 :type if_source_metageneration_match: long 

2198 :param if_source_metageneration_match: 

2199 (Optional) Makes the operation conditional on whether the source 

2200 object's current metageneration matches the given value. Also used 

2201 in the (implied) delete request. 

2202 

2203 :type if_source_metageneration_not_match: long 

2204 :param if_source_metageneration_not_match: 

2205 (Optional) Makes the operation conditional on whether the source 

2206 object's current metageneration does not match the given value. 

2207 Also used in the (implied) delete request. 

2208 

2209 :type timeout: float or tuple 

2210 :param timeout: 

2211 (Optional) The amount of time, in seconds, to wait 

2212 for the server response. See: :ref:`configuring_timeouts` 

2213 

2214 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2215 :param retry: 

2216 (Optional) How to retry the RPC. 

2217 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry 

2218 policy which will only enable retries if ``if_generation_match`` or ``generation`` 

2219 is set, in order to ensure requests are idempotent before retrying them. 

2220 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object 

2221 to enable retries regardless of generation precondition setting. 

2222 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2223 

2224 :rtype: :class:`Blob` 

2225 :returns: The newly-renamed blob. 

2226 """ 

2227 with create_trace_span(name="Storage.Bucket.renameBlob"): 

2228 same_name = blob.name == new_name 

2229 

2230 new_blob = self.copy_blob( 

2231 blob, 

2232 self, 

2233 new_name, 

2234 client=client, 

2235 timeout=timeout, 

2236 if_generation_match=if_generation_match, 

2237 if_generation_not_match=if_generation_not_match, 

2238 if_metageneration_match=if_metageneration_match, 

2239 if_metageneration_not_match=if_metageneration_not_match, 

2240 if_source_generation_match=if_source_generation_match, 

2241 if_source_generation_not_match=if_source_generation_not_match, 

2242 if_source_metageneration_match=if_source_metageneration_match, 

2243 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2244 retry=retry, 

2245 ) 

2246 

2247 if not same_name: 

2248 blob.delete( 

2249 client=client, 

2250 timeout=timeout, 

2251 if_generation_match=if_source_generation_match, 

2252 if_generation_not_match=if_source_generation_not_match, 

2253 if_metageneration_match=if_source_metageneration_match, 

2254 if_metageneration_not_match=if_source_metageneration_not_match, 

2255 retry=retry, 

2256 ) 

2257 return new_blob 

2258 

2259 def move_blob( 

2260 self, 

2261 blob, 

2262 new_name, 

2263 client=None, 

2264 if_generation_match=None, 

2265 if_generation_not_match=None, 

2266 if_metageneration_match=None, 

2267 if_metageneration_not_match=None, 

2268 if_source_generation_match=None, 

2269 if_source_generation_not_match=None, 

2270 if_source_metageneration_match=None, 

2271 if_source_metageneration_not_match=None, 

2272 timeout=_DEFAULT_TIMEOUT, 

2273 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2274 ): 

2275 """Move a blob to a new name atomically. 

2276 

2277 If :attr:`user_project` is set on the bucket, bills the API request to that project. 

2278 

2279 :type blob: :class:`google.cloud.storage.blob.Blob` 

2280 :param blob: The blob to be renamed. 

2281 

2282 :type new_name: str 

2283 :param new_name: The new name for this blob. 

2284 

2285 :type client: :class:`~google.cloud.storage.client.Client` or 

2286 ``NoneType`` 

2287 :param client: (Optional) The client to use. If not passed, falls back 

2288 to the ``client`` stored on the current bucket. 

2289 

2290 :type if_generation_match: int 

2291 :param if_generation_match: 

2292 (Optional) See :ref:`using-if-generation-match` 

2293 Note that the generation to be matched is that of the 

2294 ``destination`` blob. 

2295 

2296 :type if_generation_not_match: int 

2297 :param if_generation_not_match: 

2298 (Optional) See :ref:`using-if-generation-not-match` 

2299 Note that the generation to be matched is that of the 

2300 ``destination`` blob. 

2301 

2302 :type if_metageneration_match: int 

2303 :param if_metageneration_match: 

2304 (Optional) See :ref:`using-if-metageneration-match` 

2305 Note that the metageneration to be matched is that of the 

2306 ``destination`` blob. 

2307 

2308 :type if_metageneration_not_match: int 

2309 :param if_metageneration_not_match: 

2310 (Optional) See :ref:`using-if-metageneration-not-match` 

2311 Note that the metageneration to be matched is that of the 

2312 ``destination`` blob. 

2313 

2314 :type if_source_generation_match: int 

2315 :param if_source_generation_match: 

2316 (Optional) Makes the operation conditional on whether the source 

2317 object's generation matches the given value. 

2318 

2319 :type if_source_generation_not_match: int 

2320 :param if_source_generation_not_match: 

2321 (Optional) Makes the operation conditional on whether the source 

2322 object's generation does not match the given value. 

2323 

2324 :type if_source_metageneration_match: int 

2325 :param if_source_metageneration_match: 

2326 (Optional) Makes the operation conditional on whether the source 

2327 object's current metageneration matches the given value. 

2328 

2329 :type if_source_metageneration_not_match: int 

2330 :param if_source_metageneration_not_match: 

2331 (Optional) Makes the operation conditional on whether the source 

2332 object's current metageneration does not match the given value. 

2333 

2334 :type timeout: float or tuple 

2335 :param timeout: 

2336 (Optional) The amount of time, in seconds, to wait 

2337 for the server response. See: :ref:`configuring_timeouts` 

2338 

2339 :type retry: google.api_core.retry.Retry 

2340 :param retry: 

2341 (Optional) How to retry the RPC. 

2342 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2343 

2344 :rtype: :class:`Blob` 

2345 :returns: The newly-moved blob. 

2346 """ 

2347 with create_trace_span(name="Storage.Bucket.moveBlob"): 

2348 client = self._require_client(client) 

2349 query_params = {} 

2350 

2351 if self.user_project is not None: 

2352 query_params["userProject"] = self.user_project 

2353 

2354 _add_generation_match_parameters( 

2355 query_params, 

2356 if_generation_match=if_generation_match, 

2357 if_generation_not_match=if_generation_not_match, 

2358 if_metageneration_match=if_metageneration_match, 

2359 if_metageneration_not_match=if_metageneration_not_match, 

2360 if_source_generation_match=if_source_generation_match, 

2361 if_source_generation_not_match=if_source_generation_not_match, 

2362 if_source_metageneration_match=if_source_metageneration_match, 

2363 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2364 ) 

2365 

2366 new_blob = Blob(bucket=self, name=new_name) 

2367 api_path = "{blob_path}/moveTo/o/{new_name}".format( 

2368 blob_path=blob.path, new_name=_quote(new_blob.name) 

2369 ) 

2370 

2371 move_result = client._post_resource( 

2372 api_path, 

2373 None, 

2374 query_params=query_params, 

2375 timeout=timeout, 

2376 retry=retry, 

2377 _target_object=new_blob, 

2378 ) 

2379 

2380 new_blob._set_properties(move_result) 

2381 return new_blob 

2382 

2383 def restore_blob( 

2384 self, 

2385 blob_name, 

2386 client=None, 

2387 generation=None, 

2388 copy_source_acl=None, 

2389 projection=None, 

2390 if_generation_match=None, 

2391 if_generation_not_match=None, 

2392 if_metageneration_match=None, 

2393 if_metageneration_not_match=None, 

2394 timeout=_DEFAULT_TIMEOUT, 

2395 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2396 ): 

2397 """Restores a soft-deleted object. 

2398 

2399 If :attr:`user_project` is set on the bucket, bills the API request to that project. 

2400 

2401 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore) 

2402 

2403 :type blob_name: str 

2404 :param blob_name: The name of the blob to be restored. 

2405 

2406 :type client: :class:`~google.cloud.storage.client.Client` 

2407 :param client: (Optional) The client to use. If not passed, falls back 

2408 to the ``client`` stored on the current bucket. 

2409 

2410 :type generation: int 

2411 :param generation: Selects the specific revision of the object. 

2412 

2413 :type copy_source_acl: bool 

2414 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls. 

2415 

2416 :type projection: str 

2417 :param projection: (Optional) Specifies the set of properties to return. 

2418 If used, must be 'full' or 'noAcl'. 

2419 

2420 :type if_generation_match: long 

2421 :param if_generation_match: 

2422 (Optional) See :ref:`using-if-generation-match` 

2423 

2424 :type if_generation_not_match: long 

2425 :param if_generation_not_match: 

2426 (Optional) See :ref:`using-if-generation-not-match` 

2427 

2428 :type if_metageneration_match: long 

2429 :param if_metageneration_match: 

2430 (Optional) See :ref:`using-if-metageneration-match` 

2431 

2432 :type if_metageneration_not_match: long 

2433 :param if_metageneration_not_match: 

2434 (Optional) See :ref:`using-if-metageneration-not-match` 

2435 

2436 :type timeout: float or tuple 

2437 :param timeout: 

2438 (Optional) The amount of time, in seconds, to wait 

2439 for the server response. See: :ref:`configuring_timeouts` 

2440 

2441 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2442 :param retry: 

2443 (Optional) How to retry the RPC. 

2444 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which 

2445 only restore operations with ``if_generation_match`` or ``generation`` set 

2446 will be retried. 

2447 

2448 Users can configure non-default retry behavior. A ``None`` value will 

2449 disable retries. A ``DEFAULT_RETRY`` value will enable retries 

2450 even if restore operations are not guaranteed to be idempotent. 

2451 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2452 

2453 :rtype: :class:`google.cloud.storage.blob.Blob` 

2454 :returns: The restored Blob. 

2455 """ 

2456 with create_trace_span(name="Storage.Bucket.restore_blob"): 

2457 client = self._require_client(client) 

2458 query_params = {} 

2459 

2460 if self.user_project is not None: 

2461 query_params["userProject"] = self.user_project 

2462 if generation is not None: 

2463 query_params["generation"] = generation 

2464 if copy_source_acl is not None: 

2465 query_params["copySourceAcl"] = copy_source_acl 

2466 if projection is not None: 

2467 query_params["projection"] = projection 

2468 

2469 _add_generation_match_parameters( 

2470 query_params, 

2471 if_generation_match=if_generation_match, 

2472 if_generation_not_match=if_generation_not_match, 

2473 if_metageneration_match=if_metageneration_match, 

2474 if_metageneration_not_match=if_metageneration_not_match, 

2475 ) 

2476 

2477 blob = Blob(bucket=self, name=blob_name) 

2478 api_response = client._post_resource( 

2479 f"{blob.path}/restore", 

2480 None, 

2481 query_params=query_params, 

2482 timeout=timeout, 

2483 retry=retry, 

2484 ) 

2485 blob._set_properties(api_response) 

2486 return blob 

2487 

2488 @property 

2489 def cors(self): 

2490 """Retrieve or set CORS policies configured for this bucket. 

2491 

2492 See http://www.w3.org/TR/cors/ and 

2493 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2494 

2495 .. note:: 

2496 

2497 The getter for this property returns a list which contains 

2498 *copies* of the bucket's CORS policy mappings. Mutating the list 

2499 or one of its dicts has no effect unless you then re-assign the 

2500 dict via the setter. E.g.: 

2501 

2502 >>> policies = bucket.cors 

2503 >>> policies.append({'origin': '/foo', ...}) 

2504 >>> policies[1]['maxAgeSeconds'] = 3600 

2505 >>> del policies[0] 

2506 >>> bucket.cors = policies 

2507 >>> bucket.update() 

2508 

2509 :setter: Set CORS policies for this bucket. 

2510 :getter: Gets the CORS policies for this bucket. 

2511 

2512 :rtype: list of dictionaries 

2513 :returns: A sequence of mappings describing each CORS policy. 

2514 """ 

2515 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())] 

2516 

2517 @cors.setter 

2518 def cors(self, entries): 

2519 """Set CORS policies configured for this bucket. 

2520 

2521 See http://www.w3.org/TR/cors/ and 

2522 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2523 

2524 :type entries: list of dictionaries 

2525 :param entries: A sequence of mappings describing each CORS policy. 

2526 """ 

2527 self._patch_property("cors", entries) 

2528 

2529 default_event_based_hold = _scalar_property("defaultEventBasedHold") 

2530 """Are uploaded objects automatically placed under an even-based hold? 

2531 

2532 If True, uploaded objects will be placed under an event-based hold to 

2533 be released at a future time. When released an object will then begin 

2534 the retention period determined by the policy retention period for the 

2535 object bucket. 

2536 

2537 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2538 

2539 If the property is not set locally, returns ``None``. 

2540 

2541 :rtype: bool or ``NoneType`` 

2542 """ 

2543 

2544 @property 

2545 def encryption(self): 

2546 """Retrieve encryption configuration for this bucket. 

2547 

2548 :rtype: :class:`BucketEncryption` 

2549 :returns: an instance for managing the bucket's encryption configuration. 

2550 """ 

2551 info = self._properties.get("encryption", {}) 

2552 return BucketEncryption.from_api_repr(info, self) 

2553 

2554 @encryption.setter 

2555 def encryption(self, value): 

2556 """Set encryption configuration for this bucket. 

2557 

2558 :type value: :class:`BucketEncryption` or dict 

2559 :param value: The encryption configuration. 

2560 """ 

2561 self._patch_property("encryption", value) 

2562 

2563 @property 

2564 def default_kms_key_name(self): 

2565 """Retrieve / set default KMS encryption key for objects in the bucket. 

2566 

2567 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2568 

2569 :setter: Set default KMS encryption key for items in this bucket. 

2570 :getter: Get default KMS encryption key for items in this bucket. 

2571 

2572 :rtype: str 

2573 :returns: Default KMS encryption key, or ``None`` if not set. 

2574 """ 

2575 encryption_config = self._properties.get("encryption", {}) 

2576 return encryption_config.get("defaultKmsKeyName") 

2577 

2578 @default_kms_key_name.setter 

2579 def default_kms_key_name(self, value): 

2580 """Set default KMS encryption key for objects in the bucket. 

2581 

2582 :type value: str or None 

2583 :param value: new KMS key name (None to clear any existing key). 

2584 """ 

2585 encryption_config = self._properties.get("encryption", {}) 

2586 encryption_config["defaultKmsKeyName"] = value 

2587 self._patch_property("encryption", encryption_config) 

2588 

2589 @property 

2590 def labels(self): 

2591 """Retrieve or set labels assigned to this bucket. 

2592 

2593 See 

2594 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2595 

2596 .. note:: 

2597 

2598 The getter for this property returns a dict which is a *copy* 

2599 of the bucket's labels. Mutating that dict has no effect unless 

2600 you then re-assign the dict via the setter. E.g.: 

2601 

2602 >>> labels = bucket.labels 

2603 >>> labels['new_key'] = 'some-label' 

2604 >>> del labels['old_key'] 

2605 >>> bucket.labels = labels 

2606 >>> bucket.update() 

2607 

2608 :setter: Set labels for this bucket. 

2609 :getter: Gets the labels for this bucket. 

2610 

2611 :rtype: :class:`dict` 

2612 :returns: Name-value pairs (string->string) labelling the bucket. 

2613 """ 

2614 labels = self._properties.get("labels") 

2615 if labels is None: 

2616 return {} 

2617 return copy.deepcopy(labels) 

2618 

2619 @labels.setter 

2620 def labels(self, mapping): 

2621 """Set labels assigned to this bucket. 

2622 

2623 See 

2624 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2625 

2626 :type mapping: :class:`dict` 

2627 :param mapping: Name-value pairs (string->string) labelling the bucket. 

2628 """ 

2629 # If any labels have been expressly removed, we need to track this 

2630 # so that a future .patch() call can do the correct thing. 

2631 existing = set([k for k in self.labels.keys()]) 

2632 incoming = set([k for k in mapping.keys()]) 

2633 self._label_removals = self._label_removals.union(existing.difference(incoming)) 

2634 mapping = {k: str(v) for k, v in mapping.items()} 

2635 

2636 # Actually update the labels on the object. 

2637 self._patch_property("labels", copy.deepcopy(mapping)) 

2638 

2639 @property 

2640 def etag(self): 

2641 """Retrieve the ETag for the bucket. 

2642 

2643 See https://tools.ietf.org/html/rfc2616#section-3.11 and 

2644 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2645 

2646 :rtype: str or ``NoneType`` 

2647 :returns: The bucket etag or ``None`` if the bucket's 

2648 resource has not been loaded from the server. 

2649 """ 

2650 return self._properties.get("etag") 

2651 

2652 @property 

2653 def id(self): 

2654 """Retrieve the ID for the bucket. 

2655 

2656 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2657 

2658 :rtype: str or ``NoneType`` 

2659 :returns: The ID of the bucket or ``None`` if the bucket's 

2660 resource has not been loaded from the server. 

2661 """ 

2662 return self._properties.get("id") 

2663 

2664 @property 

2665 def iam_configuration(self): 

2666 """Retrieve IAM configuration for this bucket. 

2667 

2668 :rtype: :class:`IAMConfiguration` 

2669 :returns: an instance for managing the bucket's IAM configuration. 

2670 """ 

2671 info = self._properties.get("iamConfiguration", {}) 

2672 return IAMConfiguration.from_api_repr(info, self) 

2673 

2674 @property 

2675 def soft_delete_policy(self): 

2676 """Retrieve the soft delete policy for this bucket. 

2677 

2678 See https://cloud.google.com/storage/docs/soft-delete 

2679 

2680 :rtype: :class:`SoftDeletePolicy` 

2681 :returns: an instance for managing the bucket's soft delete policy. 

2682 """ 

2683 policy = self._properties.get("softDeletePolicy", {}) 

2684 return SoftDeletePolicy.from_api_repr(policy, self) 

2685 

2686 @property 

2687 def lifecycle_rules(self): 

2688 """Retrieve or set lifecycle rules configured for this bucket. 

2689 

2690 See https://cloud.google.com/storage/docs/lifecycle and 

2691 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2692 

2693 .. note:: 

2694 

2695 The getter for this property returns a generator which yields 

2696 *copies* of the bucket's lifecycle rules mappings. Mutating the 

2697 output dicts has no effect unless you then re-assign the dict via 

2698 the setter. E.g.: 

2699 

2700 >>> rules = list(bucket.lifecycle_rules) 

2701 >>> rules.append({'origin': '/foo', ...}) 

2702 >>> rules[1]['rule']['action']['type'] = 'Delete' 

2703 >>> del rules[0] 

2704 >>> bucket.lifecycle_rules = rules 

2705 >>> bucket.update() 

2706 

2707 :setter: Set lifecycle rules for this bucket. 

2708 :getter: Gets the lifecycle rules for this bucket. 

2709 

2710 :rtype: generator(dict) 

2711 :returns: A sequence of mappings describing each lifecycle rule. 

2712 """ 

2713 info = self._properties.get("lifecycle", {}) 

2714 for rule in info.get("rule", ()): 

2715 action_type = rule["action"]["type"] 

2716 if action_type == "Delete": 

2717 yield LifecycleRuleDelete.from_api_repr(rule) 

2718 elif action_type == "SetStorageClass": 

2719 yield LifecycleRuleSetStorageClass.from_api_repr(rule) 

2720 elif action_type == "AbortIncompleteMultipartUpload": 

2721 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule) 

2722 else: 

2723 warnings.warn( 

2724 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format( 

2725 rule 

2726 ), 

2727 UserWarning, 

2728 stacklevel=1, 

2729 ) 

2730 

2731 @lifecycle_rules.setter 

2732 def lifecycle_rules(self, rules): 

2733 """Set lifecycle rules configured for this bucket. 

2734 

2735 See https://cloud.google.com/storage/docs/lifecycle and 

2736 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2737 

2738 :type rules: list of dictionaries 

2739 :param rules: A sequence of mappings describing each lifecycle rule. 

2740 """ 

2741 rules = [dict(rule) for rule in rules] # Convert helpers if needed 

2742 self._patch_property("lifecycle", {"rule": rules}) 

2743 

2744 def clear_lifecycle_rules(self): 

2745 """Clear lifecycle rules configured for this bucket. 

2746 

2747 See https://cloud.google.com/storage/docs/lifecycle and 

2748 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2749 """ 

2750 self.lifecycle_rules = [] 

2751 

2752 def clear_lifecyle_rules(self): 

2753 """Deprecated alias for clear_lifecycle_rules.""" 

2754 return self.clear_lifecycle_rules() 

2755 

2756 def add_lifecycle_delete_rule(self, **kw): 

2757 """Add a "delete" rule to lifecycle rules configured for this bucket. 

2758 

2759 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2760 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2761 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2762 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python). 

2763 

2764 :type kw: dict 

2765 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2766 """ 

2767 rules = list(self.lifecycle_rules) 

2768 rules.append(LifecycleRuleDelete(**kw)) 

2769 self.lifecycle_rules = rules 

2770 

2771 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): 

2772 """Add a "set storage class" rule to lifecycle rules. 

2773 

2774 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2775 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2776 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2777 

2778 :type storage_class: str, one of :attr:`STORAGE_CLASSES`. 

2779 :param storage_class: new storage class to assign to matching items. 

2780 

2781 :type kw: dict 

2782 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2783 """ 

2784 rules = list(self.lifecycle_rules) 

2785 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) 

2786 self.lifecycle_rules = rules 

2787 

2788 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw): 

2789 """Add a "abort incomplete multipart upload" rule to lifecycle rules. 

2790 

2791 .. note:: 

2792 The "age" lifecycle condition is the only supported condition 

2793 for this rule. 

2794 

2795 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2796 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2797 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2798 

2799 :type kw: dict 

2800 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2801 """ 

2802 rules = list(self.lifecycle_rules) 

2803 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw)) 

2804 self.lifecycle_rules = rules 

2805 

2806 _location = _scalar_property("location") 

2807 

2808 @property 

2809 def location(self): 

2810 """Retrieve location configured for this bucket. 

2811 

2812 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2813 https://cloud.google.com/storage/docs/locations 

2814 

2815 Returns ``None`` if the property has not been set before creation, 

2816 or if the bucket's resource has not been loaded from the server. 

2817 :rtype: str or ``NoneType`` 

2818 """ 

2819 return self._location 

2820 

2821 @location.setter 

2822 def location(self, value): 

2823 """(Deprecated) Set `Bucket.location` 

2824 

2825 This can only be set at bucket **creation** time. 

2826 

2827 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2828 https://cloud.google.com/storage/docs/bucket-locations 

2829 

2830 .. warning:: 

2831 

2832 Assignment to 'Bucket.location' is deprecated, as it is only 

2833 valid before the bucket is created. Instead, pass the location 

2834 to `Bucket.create`. 

2835 """ 

2836 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) 

2837 self._location = value 

2838 

2839 @property 

2840 def data_locations(self): 

2841 """Retrieve the list of regional locations for custom dual-region buckets. 

2842 

2843 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2844 https://cloud.google.com/storage/docs/locations 

2845 

2846 Returns ``None`` if the property has not been set before creation, 

2847 if the bucket's resource has not been loaded from the server, 

2848 or if the bucket is not a dual-regions bucket. 

2849 :rtype: list of str or ``NoneType`` 

2850 """ 

2851 custom_placement_config = self._properties.get("customPlacementConfig", {}) 

2852 return custom_placement_config.get("dataLocations") 

2853 

2854 @property 

2855 def location_type(self): 

2856 """Retrieve the location type for the bucket. 

2857 

2858 See https://cloud.google.com/storage/docs/storage-classes 

2859 

2860 :getter: Gets the the location type for this bucket. 

2861 

2862 :rtype: str or ``NoneType`` 

2863 :returns: 

2864 If set, one of 

2865 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`, 

2866 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or 

2867 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`, 

2868 else ``None``. 

2869 """ 

2870 return self._properties.get("locationType") 

2871 

2872 def get_logging(self): 

2873 """Return info about access logging for this bucket. 

2874 

2875 See https://cloud.google.com/storage/docs/access-logs#status 

2876 

2877 :rtype: dict or None 

2878 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` 

2879 (if logging is enabled), or None (if not). 

2880 """ 

2881 info = self._properties.get("logging") 

2882 return copy.deepcopy(info) 

2883 

2884 def enable_logging(self, bucket_name, object_prefix=""): 

2885 """Enable access logging for this bucket. 

2886 

2887 See https://cloud.google.com/storage/docs/access-logs 

2888 

2889 :type bucket_name: str 

2890 :param bucket_name: name of bucket in which to store access logs 

2891 

2892 :type object_prefix: str 

2893 :param object_prefix: prefix for access log filenames 

2894 """ 

2895 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix} 

2896 self._patch_property("logging", info) 

2897 

2898 def disable_logging(self): 

2899 """Disable access logging for this bucket. 

2900 

2901 See https://cloud.google.com/storage/docs/access-logs#disabling 

2902 """ 

2903 self._patch_property("logging", None) 

2904 

2905 @property 

2906 def metageneration(self): 

2907 """Retrieve the metageneration for the bucket. 

2908 

2909 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2910 

2911 :rtype: int or ``NoneType`` 

2912 :returns: The metageneration of the bucket or ``None`` if the bucket's 

2913 resource has not been loaded from the server. 

2914 """ 

2915 metageneration = self._properties.get("metageneration") 

2916 if metageneration is not None: 

2917 return int(metageneration) 

2918 

2919 @property 

2920 def owner(self): 

2921 """Retrieve info about the owner of the bucket. 

2922 

2923 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2924 

2925 :rtype: dict or ``NoneType`` 

2926 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's 

2927 resource has not been loaded from the server. 

2928 """ 

2929 return copy.deepcopy(self._properties.get("owner")) 

2930 

2931 @property 

2932 def project_number(self): 

2933 """Retrieve the number of the project to which the bucket is assigned. 

2934 

2935 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2936 

2937 :rtype: int or ``NoneType`` 

2938 :returns: The project number that owns the bucket or ``None`` if 

2939 the bucket's resource has not been loaded from the server. 

2940 """ 

2941 project_number = self._properties.get("projectNumber") 

2942 if project_number is not None: 

2943 return int(project_number) 

2944 

2945 @property 

2946 def retention_policy_effective_time(self): 

2947 """Retrieve the effective time of the bucket's retention policy. 

2948 

2949 :rtype: datetime.datetime or ``NoneType`` 

2950 :returns: point-in time at which the bucket's retention policy is 

2951 effective, or ``None`` if the property is not 

2952 set locally. 

2953 """ 

2954 policy = self._properties.get("retentionPolicy") 

2955 if policy is not None: 

2956 timestamp = policy.get("effectiveTime") 

2957 if timestamp is not None: 

2958 return _rfc3339_nanos_to_datetime(timestamp) 

2959 

2960 @property 

2961 def retention_policy_locked(self): 

2962 """Retrieve whthere the bucket's retention policy is locked. 

2963 

2964 :rtype: bool 

2965 :returns: True if the bucket's policy is locked, or else False 

2966 if the policy is not locked, or the property is not 

2967 set locally. 

2968 """ 

2969 policy = self._properties.get("retentionPolicy") 

2970 if policy is not None: 

2971 return policy.get("isLocked") 

2972 

2973 @property 

2974 def retention_period(self): 

2975 """Retrieve or set the retention period for items in the bucket. 

2976 

2977 :rtype: int or ``NoneType`` 

2978 :returns: number of seconds to retain items after upload or release 

2979 from event-based lock, or ``None`` if the property is not 

2980 set locally. 

2981 """ 

2982 policy = self._properties.get("retentionPolicy") 

2983 if policy is not None: 

2984 period = policy.get("retentionPeriod") 

2985 if period is not None: 

2986 return int(period) 

2987 

2988 @retention_period.setter 

2989 def retention_period(self, value): 

2990 """Set the retention period for items in the bucket. 

2991 

2992 :type value: int 

2993 :param value: 

2994 number of seconds to retain items after upload or release from 

2995 event-based lock. 

2996 

2997 :raises ValueError: if the bucket's retention policy is locked. 

2998 """ 

2999 policy = self._properties.setdefault("retentionPolicy", {}) 

3000 if value is not None: 

3001 policy["retentionPeriod"] = str(value) 

3002 else: 

3003 policy = None 

3004 self._patch_property("retentionPolicy", policy) 

3005 

3006 @property 

3007 def self_link(self): 

3008 """Retrieve the URI for the bucket. 

3009 

3010 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

3011 

3012 :rtype: str or ``NoneType`` 

3013 :returns: The self link for the bucket or ``None`` if 

3014 the bucket's resource has not been loaded from the server. 

3015 """ 

3016 return self._properties.get("selfLink") 

3017 

3018 @property 

3019 def storage_class(self): 

3020 """Retrieve or set the storage class for the bucket. 

3021 

3022 See https://cloud.google.com/storage/docs/storage-classes 

3023 

3024 :setter: Set the storage class for this bucket. 

3025 :getter: Gets the the storage class for this bucket. 

3026 

3027 :rtype: str or ``NoneType`` 

3028 :returns: 

3029 If set, one of 

3030 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

3031 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

3032 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

3033 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

3034 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

3035 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

3036 or 

3037 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

3038 else ``None``. 

3039 """ 

3040 return self._properties.get("storageClass") 

3041 

3042 @storage_class.setter 

3043 def storage_class(self, value): 

3044 """Set the storage class for the bucket. 

3045 

3046 See https://cloud.google.com/storage/docs/storage-classes 

3047 

3048 :type value: str 

3049 :param value: 

3050 One of 

3051 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

3052 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

3053 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

3054 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

3055 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

3056 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

3057 or 

3058 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

3059 """ 

3060 self._patch_property("storageClass", value) 

3061 

3062 @property 

3063 def time_created(self): 

3064 """Retrieve the timestamp at which the bucket was created. 

3065 

3066 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

3067 

3068 :rtype: :class:`datetime.datetime` or ``NoneType`` 

3069 :returns: Datetime object parsed from RFC3339 valid timestamp, or 

3070 ``None`` if the bucket's resource has not been loaded 

3071 from the server. 

3072 """ 

3073 value = self._properties.get("timeCreated") 

3074 if value is not None: 

3075 return _rfc3339_nanos_to_datetime(value) 

3076 

3077 @property 

3078 def updated(self): 

3079 """Retrieve the timestamp at which the bucket was last updated. 

3080 

3081 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

3082 

3083 :rtype: :class:`datetime.datetime` or ``NoneType`` 

3084 :returns: Datetime object parsed from RFC3339 valid timestamp, or 

3085 ``None`` if the bucket's resource has not been loaded 

3086 from the server. 

3087 """ 

3088 value = self._properties.get("updated") 

3089 if value is not None: 

3090 return _rfc3339_nanos_to_datetime(value) 

3091 

3092 @property 

3093 def versioning_enabled(self): 

3094 """Is versioning enabled for this bucket? 

3095 

3096 See https://cloud.google.com/storage/docs/object-versioning for 

3097 details. 

3098 

3099 :setter: Update whether versioning is enabled for this bucket. 

3100 :getter: Query whether versioning is enabled for this bucket. 

3101 

3102 :rtype: bool 

3103 :returns: True if enabled, else False. 

3104 """ 

3105 versioning = self._properties.get("versioning", {}) 

3106 return versioning.get("enabled", False) 

3107 

3108 @versioning_enabled.setter 

3109 def versioning_enabled(self, value): 

3110 """Enable versioning for this bucket. 

3111 

3112 See https://cloud.google.com/storage/docs/object-versioning for 

3113 details. 

3114 

3115 :type value: convertible to boolean 

3116 :param value: should versioning be enabled for the bucket? 

3117 """ 

3118 self._patch_property("versioning", {"enabled": bool(value)}) 

3119 

3120 @property 

3121 def requester_pays(self): 

3122 """Does the requester pay for API requests for this bucket? 

3123 

3124 See https://cloud.google.com/storage/docs/requester-pays for 

3125 details. 

3126 

3127 :setter: Update whether requester pays for this bucket. 

3128 :getter: Query whether requester pays for this bucket. 

3129 

3130 :rtype: bool 

3131 :returns: True if requester pays for API requests for the bucket, 

3132 else False. 

3133 """ 

3134 versioning = self._properties.get("billing", {}) 

3135 return versioning.get("requesterPays", False) 

3136 

3137 @requester_pays.setter 

3138 def requester_pays(self, value): 

3139 """Update whether requester pays for API requests for this bucket. 

3140 

3141 See https://cloud.google.com/storage/docs/using-requester-pays for 

3142 details. 

3143 

3144 :type value: convertible to boolean 

3145 :param value: should requester pay for API requests for the bucket? 

3146 """ 

3147 self._patch_property("billing", {"requesterPays": bool(value)}) 

3148 

3149 @property 

3150 def autoclass_enabled(self): 

3151 """Whether Autoclass is enabled for this bucket. 

3152 

3153 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3154 

3155 :setter: Update whether autoclass is enabled for this bucket. 

3156 :getter: Query whether autoclass is enabled for this bucket. 

3157 

3158 :rtype: bool 

3159 :returns: True if enabled, else False. 

3160 """ 

3161 autoclass = self._properties.get("autoclass", {}) 

3162 return autoclass.get("enabled", False) 

3163 

3164 @autoclass_enabled.setter 

3165 def autoclass_enabled(self, value): 

3166 """Enable or disable Autoclass at the bucket-level. 

3167 

3168 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3169 

3170 :type value: convertible to boolean 

3171 :param value: If true, enable Autoclass for this bucket. 

3172 If false, disable Autoclass for this bucket. 

3173 """ 

3174 autoclass = self._properties.get("autoclass", {}) 

3175 autoclass["enabled"] = bool(value) 

3176 self._patch_property("autoclass", autoclass) 

3177 

3178 @property 

3179 def autoclass_toggle_time(self): 

3180 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket. 

3181 :rtype: datetime.datetime or ``NoneType`` 

3182 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally. 

3183 """ 

3184 autoclass = self._properties.get("autoclass") 

3185 if autoclass is not None: 

3186 timestamp = autoclass.get("toggleTime") 

3187 if timestamp is not None: 

3188 return _rfc3339_nanos_to_datetime(timestamp) 

3189 

3190 @property 

3191 def autoclass_terminal_storage_class(self): 

3192 """The storage class that objects in an Autoclass bucket eventually transition to if 

3193 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE. 

3194 

3195 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3196 

3197 :setter: Set the terminal storage class for Autoclass configuration. 

3198 :getter: Get the terminal storage class for Autoclass configuration. 

3199 

3200 :rtype: str 

3201 :returns: The terminal storage class if Autoclass is enabled, else ``None``. 

3202 """ 

3203 autoclass = self._properties.get("autoclass", {}) 

3204 return autoclass.get("terminalStorageClass", None) 

3205 

3206 @autoclass_terminal_storage_class.setter 

3207 def autoclass_terminal_storage_class(self, value): 

3208 """The storage class that objects in an Autoclass bucket eventually transition to if 

3209 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE. 

3210 

3211 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3212 

3213 :type value: str 

3214 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`. 

3215 """ 

3216 autoclass = self._properties.get("autoclass", {}) 

3217 autoclass["terminalStorageClass"] = value 

3218 self._patch_property("autoclass", autoclass) 

3219 

3220 @property 

3221 def autoclass_terminal_storage_class_update_time(self): 

3222 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket 

3223 :rtype: datetime.datetime or ``NoneType`` 

3224 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally. 

3225 """ 

3226 autoclass = self._properties.get("autoclass") 

3227 if autoclass is not None: 

3228 timestamp = autoclass.get("terminalStorageClassUpdateTime") 

3229 if timestamp is not None: 

3230 return _rfc3339_nanos_to_datetime(timestamp) 

3231 

3232 @property 

3233 def object_retention_mode(self): 

3234 """Retrieve the object retention mode set on the bucket. 

3235 

3236 :rtype: str 

3237 :returns: When set to Enabled, retention configurations can be 

3238 set on objects in the bucket. 

3239 """ 

3240 object_retention = self._properties.get("objectRetention") 

3241 if object_retention is not None: 

3242 return object_retention.get("mode") 

3243 

3244 @property 

3245 def hierarchical_namespace_enabled(self): 

3246 """Whether hierarchical namespace is enabled for this bucket. 

3247 

3248 :setter: Update whether hierarchical namespace is enabled for this bucket. 

3249 :getter: Query whether hierarchical namespace is enabled for this bucket. 

3250 

3251 :rtype: bool 

3252 :returns: True if enabled, else False. 

3253 """ 

3254 hns = self._properties.get("hierarchicalNamespace", {}) 

3255 return hns.get("enabled") 

3256 

3257 @hierarchical_namespace_enabled.setter 

3258 def hierarchical_namespace_enabled(self, value): 

3259 """Enable or disable hierarchical namespace at the bucket-level. 

3260 

3261 :type value: convertible to boolean 

3262 :param value: If true, enable hierarchical namespace for this bucket. 

3263 If false, disable hierarchical namespace for this bucket. 

3264 

3265 .. note:: 

3266 To enable hierarchical namespace, you must set it at bucket creation time. 

3267 Currently, hierarchical namespace configuration cannot be changed after bucket creation. 

3268 """ 

3269 hns = self._properties.get("hierarchicalNamespace", {}) 

3270 hns["enabled"] = bool(value) 

3271 self._patch_property("hierarchicalNamespace", hns) 

3272 

3273 def configure_website(self, main_page_suffix=None, not_found_page=None): 

3274 """Configure website-related properties. 

3275 

3276 See https://cloud.google.com/storage/docs/static-website 

3277 

3278 .. note:: 

3279 This configures the bucket's website-related properties,controlling how 

3280 the service behaves when accessing bucket contents as a web site. 

3281 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and 

3282 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python) 

3283 for more information. 

3284 

3285 :type main_page_suffix: str 

3286 :param main_page_suffix: The page to use as the main page 

3287 of a directory. 

3288 Typically something like index.html. 

3289 

3290 :type not_found_page: str 

3291 :param not_found_page: The file to use when a page isn't found. 

3292 """ 

3293 data = { 

3294 "mainPageSuffix": main_page_suffix, 

3295 "notFoundPage": not_found_page, 

3296 } 

3297 self._patch_property("website", data) 

3298 

3299 def disable_website(self): 

3300 """Disable the website configuration for this bucket. 

3301 

3302 This is really just a shortcut for setting the website-related 

3303 attributes to ``None``. 

3304 """ 

3305 return self.configure_website(None, None) 

3306 

3307 def get_iam_policy( 

3308 self, 

3309 client=None, 

3310 requested_policy_version=None, 

3311 timeout=_DEFAULT_TIMEOUT, 

3312 retry=DEFAULT_RETRY, 

3313 ): 

3314 """Retrieve the IAM policy for the bucket. 

3315 

3316 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy) 

3317 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python). 

3318 

3319 If :attr:`user_project` is set, bills the API request to that project. 

3320 

3321 :type client: :class:`~google.cloud.storage.client.Client` or 

3322 ``NoneType`` 

3323 :param client: (Optional) The client to use. If not passed, falls back 

3324 to the ``client`` stored on the current bucket. 

3325 

3326 :type requested_policy_version: int or ``NoneType`` 

3327 :param requested_policy_version: (Optional) The version of IAM policies to request. 

3328 If a policy with a condition is requested without 

3329 setting this, the server will return an error. 

3330 This must be set to a value of 3 to retrieve IAM 

3331 policies containing conditions. This is to prevent 

3332 client code that isn't aware of IAM conditions from 

3333 interpreting and modifying policies incorrectly. 

3334 The service might return a policy with version lower 

3335 than the one that was requested, based on the 

3336 feature syntax in the policy fetched. 

3337 

3338 :type timeout: float or tuple 

3339 :param timeout: 

3340 (Optional) The amount of time, in seconds, to wait 

3341 for the server response. See: :ref:`configuring_timeouts` 

3342 

3343 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3344 :param retry: 

3345 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3346 

3347 :rtype: :class:`google.api_core.iam.Policy` 

3348 :returns: the policy instance, based on the resource returned from 

3349 the ``getIamPolicy`` API request. 

3350 """ 

3351 with create_trace_span(name="Storage.Bucket.getIamPolicy"): 

3352 client = self._require_client(client) 

3353 query_params = {} 

3354 

3355 if self.user_project is not None: 

3356 query_params["userProject"] = self.user_project 

3357 

3358 if requested_policy_version is not None: 

3359 query_params["optionsRequestedPolicyVersion"] = requested_policy_version 

3360 

3361 info = client._get_resource( 

3362 f"{self.path}/iam", 

3363 query_params=query_params, 

3364 timeout=timeout, 

3365 retry=retry, 

3366 _target_object=None, 

3367 ) 

3368 return Policy.from_api_repr(info) 

3369 

3370 def set_iam_policy( 

3371 self, 

3372 policy, 

3373 client=None, 

3374 timeout=_DEFAULT_TIMEOUT, 

3375 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON, 

3376 ): 

3377 """Update the IAM policy for the bucket. 

3378 

3379 See 

3380 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy 

3381 

3382 If :attr:`user_project` is set, bills the API request to that project. 

3383 

3384 :type policy: :class:`google.api_core.iam.Policy` 

3385 :param policy: policy instance used to update bucket's IAM policy. 

3386 

3387 :type client: :class:`~google.cloud.storage.client.Client` or 

3388 ``NoneType`` 

3389 :param client: (Optional) The client to use. If not passed, falls back 

3390 to the ``client`` stored on the current bucket. 

3391 

3392 :type timeout: float or tuple 

3393 :param timeout: 

3394 (Optional) The amount of time, in seconds, to wait 

3395 for the server response. See: :ref:`configuring_timeouts` 

3396 

3397 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3398 :param retry: 

3399 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3400 

3401 :rtype: :class:`google.api_core.iam.Policy` 

3402 :returns: the policy instance, based on the resource returned from 

3403 the ``setIamPolicy`` API request. 

3404 """ 

3405 with create_trace_span(name="Storage.Bucket.setIamPolicy"): 

3406 client = self._require_client(client) 

3407 query_params = {} 

3408 

3409 if self.user_project is not None: 

3410 query_params["userProject"] = self.user_project 

3411 

3412 path = f"{self.path}/iam" 

3413 resource = policy.to_api_repr() 

3414 resource["resourceId"] = self.path 

3415 

3416 info = client._put_resource( 

3417 path, 

3418 resource, 

3419 query_params=query_params, 

3420 timeout=timeout, 

3421 retry=retry, 

3422 _target_object=None, 

3423 ) 

3424 

3425 return Policy.from_api_repr(info) 

3426 

3427 def test_iam_permissions( 

3428 self, 

3429 permissions, 

3430 client=None, 

3431 timeout=_DEFAULT_TIMEOUT, 

3432 retry=DEFAULT_RETRY, 

3433 ): 

3434 """API call: test permissions 

3435 

3436 See 

3437 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions 

3438 

3439 If :attr:`user_project` is set, bills the API request to that project. 

3440 

3441 :type permissions: list of string 

3442 :param permissions: the permissions to check 

3443 

3444 :type client: :class:`~google.cloud.storage.client.Client` or 

3445 ``NoneType`` 

3446 :param client: (Optional) The client to use. If not passed, falls back 

3447 to the ``client`` stored on the current bucket. 

3448 

3449 :type timeout: float or tuple 

3450 :param timeout: 

3451 (Optional) The amount of time, in seconds, to wait 

3452 for the server response. See: :ref:`configuring_timeouts` 

3453 

3454 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3455 :param retry: 

3456 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3457 

3458 :rtype: list of string 

3459 :returns: the permissions returned by the ``testIamPermissions`` API 

3460 request. 

3461 """ 

3462 with create_trace_span(name="Storage.Bucket.testIamPermissions"): 

3463 client = self._require_client(client) 

3464 query_params = {"permissions": permissions} 

3465 

3466 if self.user_project is not None: 

3467 query_params["userProject"] = self.user_project 

3468 

3469 path = f"{self.path}/iam/testPermissions" 

3470 resp = client._get_resource( 

3471 path, 

3472 query_params=query_params, 

3473 timeout=timeout, 

3474 retry=retry, 

3475 _target_object=None, 

3476 ) 

3477 return resp.get("permissions", []) 

3478 

3479 def make_public( 

3480 self, 

3481 recursive=False, 

3482 future=False, 

3483 client=None, 

3484 timeout=_DEFAULT_TIMEOUT, 

3485 if_metageneration_match=None, 

3486 if_metageneration_not_match=None, 

3487 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

3488 ): 

3489 """Update bucket's ACL, granting read access to anonymous users. 

3490 

3491 :type recursive: bool 

3492 :param recursive: If True, this will make all blobs inside the bucket 

3493 public as well. 

3494 

3495 :type future: bool 

3496 :param future: If True, this will make all objects created in the 

3497 future public as well. 

3498 

3499 :type client: :class:`~google.cloud.storage.client.Client` or 

3500 ``NoneType`` 

3501 :param client: (Optional) The client to use. If not passed, falls back 

3502 to the ``client`` stored on the current bucket. 

3503 :type timeout: float or tuple 

3504 :param timeout: 

3505 (Optional) The amount of time, in seconds, to wait 

3506 for the server response. See: :ref:`configuring_timeouts` 

3507 

3508 :type if_metageneration_match: long 

3509 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

3510 blob's current metageneration matches the given value. 

3511 

3512 :type if_metageneration_not_match: long 

3513 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

3514 blob's current metageneration does not match the given value. 

3515 

3516 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3517 :param retry: 

3518 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3519 

3520 :raises ValueError: 

3521 If ``recursive`` is True, and the bucket contains more than 256 

3522 blobs. This is to prevent extremely long runtime of this 

3523 method. For such buckets, iterate over the blobs returned by 

3524 :meth:`list_blobs` and call 

3525 :meth:`~google.cloud.storage.blob.Blob.make_public` 

3526 for each blob. 

3527 """ 

3528 with create_trace_span(name="Storage.Bucket.makePublic"): 

3529 self.acl.all().grant_read() 

3530 self.acl.save( 

3531 client=client, 

3532 timeout=timeout, 

3533 if_metageneration_match=if_metageneration_match, 

3534 if_metageneration_not_match=if_metageneration_not_match, 

3535 retry=retry, 

3536 ) 

3537 

3538 if future: 

3539 doa = self.default_object_acl 

3540 if not doa.loaded: 

3541 doa.reload(client=client, timeout=timeout) 

3542 doa.all().grant_read() 

3543 doa.save( 

3544 client=client, 

3545 timeout=timeout, 

3546 if_metageneration_match=if_metageneration_match, 

3547 if_metageneration_not_match=if_metageneration_not_match, 

3548 retry=retry, 

3549 ) 

3550 

3551 if recursive: 

3552 blobs = list( 

3553 self.list_blobs( 

3554 projection="full", 

3555 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

3556 client=client, 

3557 timeout=timeout, 

3558 ) 

3559 ) 

3560 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

3561 message = ( 

3562 "Refusing to make public recursively with more than " 

3563 "%d objects. If you actually want to make every object " 

3564 "in this bucket public, iterate through the blobs " 

3565 "returned by 'Bucket.list_blobs()' and call " 

3566 "'make_public' on each one." 

3567 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

3568 raise ValueError(message) 

3569 

3570 for blob in blobs: 

3571 blob.acl.all().grant_read() 

3572 blob.acl.save( 

3573 client=client, 

3574 timeout=timeout, 

3575 ) 

3576 

3577 def make_private( 

3578 self, 

3579 recursive=False, 

3580 future=False, 

3581 client=None, 

3582 timeout=_DEFAULT_TIMEOUT, 

3583 if_metageneration_match=None, 

3584 if_metageneration_not_match=None, 

3585 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

3586 ): 

3587 """Update bucket's ACL, revoking read access for anonymous users. 

3588 

3589 :type recursive: bool 

3590 :param recursive: If True, this will make all blobs inside the bucket 

3591 private as well. 

3592 

3593 :type future: bool 

3594 :param future: If True, this will make all objects created in the 

3595 future private as well. 

3596 

3597 :type client: :class:`~google.cloud.storage.client.Client` or 

3598 ``NoneType`` 

3599 :param client: (Optional) The client to use. If not passed, falls back 

3600 to the ``client`` stored on the current bucket. 

3601 

3602 :type timeout: float or tuple 

3603 :param timeout: 

3604 (Optional) The amount of time, in seconds, to wait 

3605 for the server response. See: :ref:`configuring_timeouts` 

3606 

3607 :type if_metageneration_match: long 

3608 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

3609 blob's current metageneration matches the given value. 

3610 :type if_metageneration_not_match: long 

3611 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

3612 blob's current metageneration does not match the given value. 

3613 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3614 :param retry: 

3615 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3616 

3617 :raises ValueError: 

3618 If ``recursive`` is True, and the bucket contains more than 256 

3619 blobs. This is to prevent extremely long runtime of this 

3620 method. For such buckets, iterate over the blobs returned by 

3621 :meth:`list_blobs` and call 

3622 :meth:`~google.cloud.storage.blob.Blob.make_private` 

3623 for each blob. 

3624 """ 

3625 with create_trace_span(name="Storage.Bucket.makePrivate"): 

3626 self.acl.all().revoke_read() 

3627 self.acl.save( 

3628 client=client, 

3629 timeout=timeout, 

3630 if_metageneration_match=if_metageneration_match, 

3631 if_metageneration_not_match=if_metageneration_not_match, 

3632 retry=retry, 

3633 ) 

3634 

3635 if future: 

3636 doa = self.default_object_acl 

3637 if not doa.loaded: 

3638 doa.reload(client=client, timeout=timeout) 

3639 doa.all().revoke_read() 

3640 doa.save( 

3641 client=client, 

3642 timeout=timeout, 

3643 if_metageneration_match=if_metageneration_match, 

3644 if_metageneration_not_match=if_metageneration_not_match, 

3645 retry=retry, 

3646 ) 

3647 

3648 if recursive: 

3649 blobs = list( 

3650 self.list_blobs( 

3651 projection="full", 

3652 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

3653 client=client, 

3654 timeout=timeout, 

3655 ) 

3656 ) 

3657 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

3658 message = ( 

3659 "Refusing to make private recursively with more than " 

3660 "%d objects. If you actually want to make every object " 

3661 "in this bucket private, iterate through the blobs " 

3662 "returned by 'Bucket.list_blobs()' and call " 

3663 "'make_private' on each one." 

3664 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

3665 raise ValueError(message) 

3666 

3667 for blob in blobs: 

3668 blob.acl.all().revoke_read() 

3669 blob.acl.save(client=client, timeout=timeout) 

3670 

3671 def generate_upload_policy(self, conditions, expiration=None, client=None): 

3672 """Create a signed upload policy for uploading objects. 

3673 

3674 This method generates and signs a policy document. You can use 

3675 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms) 

3676 to allow visitors to a website to upload files to 

3677 Google Cloud Storage without giving them direct write access. 

3678 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python). 

3679 

3680 :type expiration: datetime 

3681 :param expiration: (Optional) Expiration in UTC. If not specified, the 

3682 policy will expire in 1 hour. 

3683 

3684 :type conditions: list 

3685 :param conditions: A list of conditions as described in the 

3686 `policy documents` documentation. 

3687 

3688 :type client: :class:`~google.cloud.storage.client.Client` 

3689 :param client: (Optional) The client to use. If not passed, falls back 

3690 to the ``client`` stored on the current bucket. 

3691 

3692 :rtype: dict 

3693 :returns: A dictionary of (form field name, form field value) of form 

3694 fields that should be added to your HTML upload form in order 

3695 to attach the signature. 

3696 """ 

3697 client = self._require_client(client) 

3698 credentials = client._credentials 

3699 _signing.ensure_signed_credentials(credentials) 

3700 

3701 if expiration is None: 

3702 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1) 

3703 

3704 conditions = conditions + [{"bucket": self.name}] 

3705 

3706 policy_document = { 

3707 "expiration": _datetime_to_rfc3339(expiration), 

3708 "conditions": conditions, 

3709 } 

3710 

3711 encoded_policy_document = base64.b64encode( 

3712 json.dumps(policy_document).encode("utf-8") 

3713 ) 

3714 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document)) 

3715 

3716 fields = { 

3717 "bucket": self.name, 

3718 "GoogleAccessId": credentials.signer_email, 

3719 "policy": encoded_policy_document.decode("utf-8"), 

3720 "signature": signature.decode("utf-8"), 

3721 } 

3722 

3723 return fields 

3724 

3725 def lock_retention_policy( 

3726 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

3727 ): 

3728 """Lock the bucket's retention policy. 

3729 

3730 :type client: :class:`~google.cloud.storage.client.Client` or 

3731 ``NoneType`` 

3732 :param client: (Optional) The client to use. If not passed, falls back 

3733 to the ``client`` stored on the blob's bucket. 

3734 

3735 :type timeout: float or tuple 

3736 :param timeout: 

3737 (Optional) The amount of time, in seconds, to wait 

3738 for the server response. See: :ref:`configuring_timeouts` 

3739 

3740 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3741 :param retry: 

3742 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3743 

3744 :raises ValueError: 

3745 if the bucket has no metageneration (i.e., new or never reloaded); 

3746 if the bucket has no retention policy assigned; 

3747 if the bucket's retention policy is already locked. 

3748 """ 

3749 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"): 

3750 if "metageneration" not in self._properties: 

3751 raise ValueError( 

3752 "Bucket has no retention policy assigned: try 'reload'?" 

3753 ) 

3754 

3755 policy = self._properties.get("retentionPolicy") 

3756 

3757 if policy is None: 

3758 raise ValueError( 

3759 "Bucket has no retention policy assigned: try 'reload'?" 

3760 ) 

3761 

3762 if policy.get("isLocked"): 

3763 raise ValueError("Bucket's retention policy is already locked.") 

3764 

3765 client = self._require_client(client) 

3766 

3767 query_params = {"ifMetagenerationMatch": self.metageneration} 

3768 

3769 if self.user_project is not None: 

3770 query_params["userProject"] = self.user_project 

3771 

3772 path = f"/b/{self.name}/lockRetentionPolicy" 

3773 api_response = client._post_resource( 

3774 path, 

3775 None, 

3776 query_params=query_params, 

3777 timeout=timeout, 

3778 retry=retry, 

3779 _target_object=self, 

3780 ) 

3781 self._set_properties(api_response) 

3782 

3783 def generate_signed_url( 

3784 self, 

3785 expiration=None, 

3786 api_access_endpoint=None, 

3787 method="GET", 

3788 headers=None, 

3789 query_parameters=None, 

3790 client=None, 

3791 credentials=None, 

3792 version=None, 

3793 virtual_hosted_style=False, 

3794 bucket_bound_hostname=None, 

3795 scheme="http", 

3796 ): 

3797 """Generates a signed URL for this bucket. 

3798 

3799 .. note:: 

3800 

3801 If you are on Google Compute Engine, you can't generate a signed 

3802 URL using GCE service account. If you'd like to be able to generate 

3803 a signed URL from GCE, you can use a standard service account from a 

3804 JSON file rather than a GCE service account. 

3805 

3806 If you have a bucket that you want to allow access to for a set 

3807 amount of time, you can use this method to generate a URL that 

3808 is only valid within a certain time period. 

3809 

3810 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`, 

3811 ``https`` works only if using a ``CDN``. 

3812 

3813 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] 

3814 :param expiration: Point in time when the signed URL should expire. If 

3815 a ``datetime`` instance is passed without an explicit 

3816 ``tzinfo`` set, it will be assumed to be ``UTC``. 

3817 

3818 :type api_access_endpoint: str 

3819 :param api_access_endpoint: (Optional) URI base, for instance 

3820 "https://storage.googleapis.com". If not specified, the client's 

3821 api_endpoint will be used. Incompatible with bucket_bound_hostname. 

3822 

3823 :type method: str 

3824 :param method: The HTTP verb that will be used when requesting the URL. 

3825 

3826 :type headers: dict 

3827 :param headers: 

3828 (Optional) Additional HTTP headers to be included as part of the 

3829 signed URLs. See: 

3830 https://cloud.google.com/storage/docs/xml-api/reference-headers 

3831 Requests using the signed URL *must* pass the specified header 

3832 (name and value) with each request for the URL. 

3833 

3834 :type query_parameters: dict 

3835 :param query_parameters: 

3836 (Optional) Additional query parameters to be included as part of the 

3837 signed URLs. See: 

3838 https://cloud.google.com/storage/docs/xml-api/reference-headers#query 

3839 

3840 :type client: :class:`~google.cloud.storage.client.Client` or 

3841 ``NoneType`` 

3842 :param client: (Optional) The client to use. If not passed, falls back 

3843 to the ``client`` stored on the blob's bucket. 

3844 

3845 :type credentials: :class:`google.auth.credentials.Credentials` or 

3846 :class:`NoneType` 

3847 :param credentials: The authorization credentials to attach to requests. 

3848 These credentials identify this application to the service. 

3849 If none are specified, the client will attempt to ascertain 

3850 the credentials from the environment. 

3851 

3852 :type version: str 

3853 :param version: (Optional) The version of signed credential to create. 

3854 Must be one of 'v2' | 'v4'. 

3855 

3856 :type virtual_hosted_style: bool 

3857 :param virtual_hosted_style: 

3858 (Optional) If true, then construct the URL relative the bucket's 

3859 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'. 

3860 Incompatible with bucket_bound_hostname. 

3861 

3862 :type bucket_bound_hostname: str 

3863 :param bucket_bound_hostname: 

3864 (Optional) If passed, then construct the URL relative to the bucket-bound hostname. 

3865 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'. 

3866 Incompatible with api_access_endpoint and virtual_hosted_style. 

3867 See: https://cloud.google.com/storage/docs/request-endpoints#cname 

3868 

3869 :type scheme: str 

3870 :param scheme: 

3871 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use 

3872 this value as the scheme. ``https`` will work only when using a CDN. 

3873 Defaults to ``"http"``. 

3874 

3875 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used. 

3876 :raises: :exc:`TypeError` when expiration is not a valid type. 

3877 :raises: :exc:`AttributeError` if credentials is not an instance 

3878 of :class:`google.auth.credentials.Signing`. 

3879 

3880 :rtype: str 

3881 :returns: A signed URL you can use to access the resource 

3882 until expiration. 

3883 """ 

3884 if version is None: 

3885 version = "v2" 

3886 elif version not in ("v2", "v4"): 

3887 raise ValueError("'version' must be either 'v2' or 'v4'") 

3888 

3889 if ( 

3890 api_access_endpoint is not None or virtual_hosted_style 

3891 ) and bucket_bound_hostname: 

3892 raise ValueError( 

3893 "The bucket_bound_hostname argument is not compatible with " 

3894 "either api_access_endpoint or virtual_hosted_style." 

3895 ) 

3896 

3897 if api_access_endpoint is None: 

3898 client = self._require_client(client) 

3899 api_access_endpoint = client.api_endpoint 

3900 

3901 # If you are on Google Compute Engine, you can't generate a signed URL 

3902 # using GCE service account. 

3903 # See https://github.com/googleapis/google-auth-library-python/issues/50 

3904 if virtual_hosted_style: 

3905 api_access_endpoint = _virtual_hosted_style_base_url( 

3906 api_access_endpoint, self.name 

3907 ) 

3908 resource = "/" 

3909 elif bucket_bound_hostname: 

3910 api_access_endpoint = _bucket_bound_hostname_url( 

3911 bucket_bound_hostname, scheme 

3912 ) 

3913 resource = "/" 

3914 else: 

3915 resource = f"/{self.name}" 

3916 

3917 if credentials is None: 

3918 client = self._require_client(client) # May be redundant, but that's ok. 

3919 credentials = client._credentials 

3920 

3921 if version == "v2": 

3922 helper = generate_signed_url_v2 

3923 else: 

3924 helper = generate_signed_url_v4 

3925 

3926 return helper( 

3927 credentials, 

3928 resource=resource, 

3929 expiration=expiration, 

3930 api_access_endpoint=api_access_endpoint, 

3931 method=method.upper(), 

3932 headers=headers, 

3933 query_parameters=query_parameters, 

3934 ) 

3935 

3936 @property 

3937 def ip_filter(self): 

3938 """Retrieve or set the IP Filter configuration for this bucket. 

3939 

3940 See https://cloud.google.com/storage/docs/ip-filtering-overview and 

3941 https://cloud.google.com/storage/docs/json_api/v1/buckets#ipFilter 

3942 

3943 .. note:: 

3944 The getter for this property returns an 

3945 :class:`~google.cloud.storage.ip_filter.IPFilter` object, which is a 

3946 structured representation of the bucket's IP filter configuration. 

3947 Modifying the returned object has no effect. To update the bucket's 

3948 IP filter, create and assign a new ``IPFilter`` object to this 

3949 property and then call 

3950 :meth:`~google.cloud.storage.bucket.Bucket.patch`. 

3951 

3952 .. code-block:: python 

3953 

3954 from google.cloud.storage.ip_filter import ( 

3955 IPFilter, 

3956 PublicNetworkSource, 

3957 ) 

3958 

3959 ip_filter = IPFilter() 

3960 ip_filter.mode = "Enabled" 

3961 ip_filter.public_network_source = PublicNetworkSource( 

3962 allowed_ip_cidr_ranges=["203.0.113.5/32"] 

3963 ) 

3964 bucket.ip_filter = ip_filter 

3965 bucket.patch() 

3966 

3967 :setter: Set the IP Filter configuration for this bucket. 

3968 :getter: Gets the IP Filter configuration for this bucket. 

3969 

3970 :rtype: :class:`~google.cloud.storage.ip_filter.IPFilter` or ``NoneType`` 

3971 :returns: 

3972 An ``IPFilter`` object representing the configuration, or ``None`` 

3973 if no filter is configured. 

3974 """ 

3975 resource = self._properties.get(_IP_FILTER_PROPERTY) 

3976 if resource: 

3977 return IPFilter._from_api_resource(resource) 

3978 return None 

3979 

3980 @ip_filter.setter 

3981 def ip_filter(self, value): 

3982 if value is None: 

3983 self._patch_property(_IP_FILTER_PROPERTY, None) 

3984 elif isinstance(value, IPFilter): 

3985 self._patch_property(_IP_FILTER_PROPERTY, value._to_api_resource()) 

3986 else: 

3987 self._patch_property(_IP_FILTER_PROPERTY, value) 

3988 

3989 

3990class EncryptionEnforcementConfig(dict): 

3991 """Map a bucket's encryption enforcement configuration. 

3992 

3993 :type restriction_mode: str 

3994 :param restriction_mode: 

3995 (Optional) The restriction mode for the encryption type. 

3996 When set to ``FullyRestricted``, the bucket will only allow objects encrypted with the encryption type corresponding to this configuration. 

3997 When set to ``NotRestricted``, the bucket will allow objects encrypted with any encryption type. 

3998 

3999 :type effective_time: :class:`datetime.datetime` 

4000 :param effective_time: 

4001 (Output only) The time when the encryption enforcement configuration became effective. 

4002 """ 

4003 

4004 def __init__(self, restriction_mode=None): 

4005 data = {} 

4006 if restriction_mode is not None: 

4007 # Validate input against allowed constants 

4008 allowed = ( 

4009 ENFORCEMENT_MODE_FULLY_RESTRICTED, 

4010 ENFORCEMENT_MODE_NOT_RESTRICTED, 

4011 ) 

4012 if restriction_mode not in allowed: 

4013 raise ValueError( 

4014 f"Invalid restriction_mode: {restriction_mode}. " 

4015 f"Must be one of {allowed}" 

4016 ) 

4017 data["restrictionMode"] = restriction_mode 

4018 

4019 super().__init__(data) 

4020 

4021 @classmethod 

4022 def from_api_repr(cls, resource): 

4023 """Factory: construct instance from resource. 

4024 

4025 :type resource: dict 

4026 :param resource: mapping as returned from API call. 

4027 

4028 :rtype: :class:`EncryptionEnforcementConfig` 

4029 :returns: Instance created from resource. 

4030 """ 

4031 instance = cls() 

4032 instance.update(resource) 

4033 return instance 

4034 

4035 @property 

4036 def restriction_mode(self): 

4037 """Get the restriction mode. 

4038 

4039 :rtype: str or ``NoneType`` 

4040 :returns: The restriction mode or ``None`` if the property is not set. 

4041 """ 

4042 return self.get("restrictionMode") 

4043 

4044 @restriction_mode.setter 

4045 def restriction_mode(self, value): 

4046 """Set the restriction mode. 

4047 

4048 :type value: str 

4049 :param value: The restriction mode. 

4050 """ 

4051 self["restrictionMode"] = value 

4052 

4053 @property 

4054 def effective_time(self): 

4055 """Get the effective time. 

4056 

4057 :rtype: datetime.datetime or ``NoneType`` 

4058 :returns: point-in time at which the configuration is effective, 

4059 or ``None`` if the property is not set. 

4060 """ 

4061 timestamp = self.get("effectiveTime") 

4062 if timestamp is not None: 

4063 return _rfc3339_nanos_to_datetime(timestamp) 

4064 

4065 

4066class BucketEncryption(dict): 

4067 """Map a bucket's encryption configuration. 

4068 

4069 :type bucket: :class:`Bucket` 

4070 :param bucket: Bucket for which this instance is the policy. 

4071 

4072 :type default_kms_key_name: str 

4073 :param default_kms_key_name: 

4074 (Optional) Resource name of KMS key used to encrypt bucket's content. 

4075 

4076 :type google_managed_encryption_enforcement_config: :class:`EncryptionEnforcementConfig` 

4077 :param google_managed_encryption_enforcement_config: 

4078 (Optional) Encryption enforcement configuration for Google managed encryption. 

4079 

4080 :type customer_managed_encryption_enforcement_config: :class:`EncryptionEnforcementConfig` 

4081 :param customer_managed_encryption_enforcement_config: 

4082 (Optional) Encryption enforcement configuration for Customer managed encryption. 

4083 

4084 :type customer_supplied_encryption_enforcement_config: :class:`EncryptionEnforcementConfig` 

4085 :param customer_supplied_encryption_enforcement_config: 

4086 (Optional) Encryption enforcement configuration for Customer supplied encryption. 

4087 """ 

4088 

4089 def __init__( 

4090 self, 

4091 bucket, 

4092 default_kms_key_name=None, 

4093 google_managed_encryption_enforcement_config=None, 

4094 customer_managed_encryption_enforcement_config=None, 

4095 customer_supplied_encryption_enforcement_config=None, 

4096 ): 

4097 data = {} 

4098 if default_kms_key_name is not None: 

4099 data["defaultKmsKeyName"] = default_kms_key_name 

4100 

4101 if google_managed_encryption_enforcement_config is not None: 

4102 data["googleManagedEncryptionEnforcementConfig"] = ( 

4103 google_managed_encryption_enforcement_config 

4104 ) 

4105 

4106 if customer_managed_encryption_enforcement_config is not None: 

4107 data["customerManagedEncryptionEnforcementConfig"] = ( 

4108 customer_managed_encryption_enforcement_config 

4109 ) 

4110 

4111 if customer_supplied_encryption_enforcement_config is not None: 

4112 data["customerSuppliedEncryptionEnforcementConfig"] = ( 

4113 customer_supplied_encryption_enforcement_config 

4114 ) 

4115 

4116 super().__init__(data) 

4117 self._bucket = bucket 

4118 

4119 @classmethod 

4120 def from_api_repr(cls, resource, bucket): 

4121 """Factory: construct instance from resource. 

4122 

4123 :type resource: dict 

4124 :param resource: mapping as returned from API call. 

4125 

4126 :type bucket: :class:`Bucket` 

4127 :params bucket: Bucket for which this instance is the policy. 

4128 

4129 :rtype: :class:`BucketEncryption` 

4130 :returns: Instance created from resource. 

4131 """ 

4132 instance = cls(bucket) 

4133 instance.update(resource) 

4134 return instance 

4135 

4136 @property 

4137 def bucket(self): 

4138 """Bucket for which this instance is the policy. 

4139 

4140 :rtype: :class:`Bucket` 

4141 :returns: the instance's bucket. 

4142 """ 

4143 return self._bucket 

4144 

4145 @property 

4146 def default_kms_key_name(self): 

4147 """Retrieve default KMS encryption key for objects in the bucket. 

4148 

4149 :rtype: str or ``NoneType`` 

4150 :returns: Default KMS encryption key, or ``None`` if not set. 

4151 """ 

4152 return self.get("defaultKmsKeyName") 

4153 

4154 @default_kms_key_name.setter 

4155 def default_kms_key_name(self, value): 

4156 """Set default KMS encryption key for objects in the bucket. 

4157 

4158 :type value: str or None 

4159 :param value: new KMS key name (None to clear any existing key). 

4160 """ 

4161 self["defaultKmsKeyName"] = value 

4162 self.bucket._patch_property("encryption", self) 

4163 

4164 @property 

4165 def google_managed_encryption_enforcement_config(self): 

4166 """Retrieve the encryption enforcement configuration for Google managed encryption. 

4167 

4168 :rtype: :class:`EncryptionEnforcementConfig` 

4169 :returns: The configuration instance. 

4170 """ 

4171 data = self.get("googleManagedEncryptionEnforcementConfig") 

4172 if data: 

4173 return EncryptionEnforcementConfig.from_api_repr(data) 

4174 return None 

4175 

4176 @google_managed_encryption_enforcement_config.setter 

4177 def google_managed_encryption_enforcement_config(self, value): 

4178 """Set the encryption enforcement configuration for Google managed encryption. 

4179 

4180 :type value: :class:`EncryptionEnforcementConfig` or dict 

4181 :param value: The configuration instance or dictionary. 

4182 """ 

4183 self["googleManagedEncryptionEnforcementConfig"] = value 

4184 self.bucket._patch_property("encryption", self) 

4185 

4186 @property 

4187 def customer_managed_encryption_enforcement_config(self): 

4188 """Retrieve the encryption enforcement configuration for Customer managed encryption. 

4189 

4190 :rtype: :class:`EncryptionEnforcementConfig` 

4191 :returns: The configuration instance. 

4192 """ 

4193 data = self.get("customerManagedEncryptionEnforcementConfig") 

4194 if data: 

4195 return EncryptionEnforcementConfig.from_api_repr(data) 

4196 return None 

4197 

4198 @customer_managed_encryption_enforcement_config.setter 

4199 def customer_managed_encryption_enforcement_config(self, value): 

4200 """Set the encryption enforcement configuration for Customer managed encryption. 

4201 

4202 :type value: :class:`EncryptionEnforcementConfig` or dict 

4203 :param value: The configuration instance or dictionary. 

4204 """ 

4205 self["customerManagedEncryptionEnforcementConfig"] = value 

4206 self.bucket._patch_property("encryption", self) 

4207 

4208 @property 

4209 def customer_supplied_encryption_enforcement_config(self): 

4210 """Retrieve the encryption enforcement configuration for Customer supplied encryption. 

4211 

4212 :rtype: :class:`EncryptionEnforcementConfig` 

4213 :returns: The configuration instance. 

4214 """ 

4215 data = self.get("customerSuppliedEncryptionEnforcementConfig") 

4216 if data: 

4217 return EncryptionEnforcementConfig.from_api_repr(data) 

4218 return None 

4219 

4220 @customer_supplied_encryption_enforcement_config.setter 

4221 def customer_supplied_encryption_enforcement_config(self, value): 

4222 """Set the encryption enforcement configuration for Customer supplied encryption. 

4223 

4224 :type value: :class:`EncryptionEnforcementConfig` or dict 

4225 :param value: The configuration instance or dictionary. 

4226 """ 

4227 self["customerSuppliedEncryptionEnforcementConfig"] = value 

4228 self.bucket._patch_property("encryption", self) 

4229 

4230 

4231class SoftDeletePolicy(dict): 

4232 """Map a bucket's soft delete policy. 

4233 

4234 See https://cloud.google.com/storage/docs/soft-delete 

4235 

4236 :type bucket: :class:`Bucket` 

4237 :param bucket: Bucket for which this instance is the policy. 

4238 

4239 :type retention_duration_seconds: int 

4240 :param retention_duration_seconds: 

4241 (Optional) The period of time in seconds that soft-deleted objects in the bucket 

4242 will be retained and cannot be permanently deleted. 

4243 

4244 :type effective_time: :class:`datetime.datetime` 

4245 :param effective_time: 

4246 (Optional) When the bucket's soft delete policy is effective. 

4247 This value should normally only be set by the back-end API. 

4248 """ 

4249 

4250 def __init__(self, bucket, **kw): 

4251 data = {} 

4252 retention_duration_seconds = kw.get("retention_duration_seconds") 

4253 data["retentionDurationSeconds"] = retention_duration_seconds 

4254 

4255 effective_time = kw.get("effective_time") 

4256 if effective_time is not None: 

4257 effective_time = _datetime_to_rfc3339(effective_time) 

4258 data["effectiveTime"] = effective_time 

4259 

4260 super().__init__(data) 

4261 self._bucket = bucket 

4262 

4263 @classmethod 

4264 def from_api_repr(cls, resource, bucket): 

4265 """Factory: construct instance from resource. 

4266 

4267 :type resource: dict 

4268 :param resource: mapping as returned from API call. 

4269 

4270 :type bucket: :class:`Bucket` 

4271 :params bucket: Bucket for which this instance is the policy. 

4272 

4273 :rtype: :class:`SoftDeletePolicy` 

4274 :returns: Instance created from resource. 

4275 """ 

4276 instance = cls(bucket) 

4277 instance.update(resource) 

4278 return instance 

4279 

4280 @property 

4281 def bucket(self): 

4282 """Bucket for which this instance is the policy. 

4283 

4284 :rtype: :class:`Bucket` 

4285 :returns: the instance's bucket. 

4286 """ 

4287 return self._bucket 

4288 

4289 @property 

4290 def retention_duration_seconds(self): 

4291 """Get the retention duration of the bucket's soft delete policy. 

4292 

4293 :rtype: int or ``NoneType`` 

4294 :returns: The period of time in seconds that soft-deleted objects in the bucket 

4295 will be retained and cannot be permanently deleted; Or ``None`` if the 

4296 property is not set. 

4297 """ 

4298 duration = self.get("retentionDurationSeconds") 

4299 if duration is not None: 

4300 return int(duration) 

4301 

4302 @retention_duration_seconds.setter 

4303 def retention_duration_seconds(self, value): 

4304 """Set the retention duration of the bucket's soft delete policy. 

4305 

4306 :type value: int 

4307 :param value: 

4308 The period of time in seconds that soft-deleted objects in the bucket 

4309 will be retained and cannot be permanently deleted. 

4310 """ 

4311 self["retentionDurationSeconds"] = value 

4312 self.bucket._patch_property("softDeletePolicy", self) 

4313 

4314 @property 

4315 def effective_time(self): 

4316 """Get the effective time of the bucket's soft delete policy. 

4317 

4318 :rtype: datetime.datetime or ``NoneType`` 

4319 :returns: point-in time at which the bucket's soft delte policy is 

4320 effective, or ``None`` if the property is not set. 

4321 """ 

4322 timestamp = self.get("effectiveTime") 

4323 if timestamp is not None: 

4324 return _rfc3339_nanos_to_datetime(timestamp) 

4325 

4326 

4327def _raise_if_len_differs(expected_len, **generation_match_args): 

4328 """ 

4329 Raise an error if any generation match argument 

4330 is set and its len differs from the given value. 

4331 

4332 :type expected_len: int 

4333 :param expected_len: Expected argument length in case it's set. 

4334 

4335 :type generation_match_args: dict 

4336 :param generation_match_args: Lists, which length must be checked. 

4337 

4338 :raises: :exc:`ValueError` if any argument set, but has an unexpected length. 

4339 """ 

4340 for name, value in generation_match_args.items(): 

4341 if value is not None and len(value) != expected_len: 

4342 raise ValueError(f"'{name}' length must be the same as 'blobs' length")