Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/google/cloud/storage/bucket.py: 35%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

852 statements  

1# Copyright 2014 Google LLC 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14 

15"""Create / interact with Google Cloud Storage buckets.""" 

16 

17import base64 

18import copy 

19import datetime 

20import json 

21from urllib.parse import urlsplit 

22import warnings 

23 

24from google.api_core import datetime_helpers 

25from google.cloud._helpers import _datetime_to_rfc3339 

26from google.cloud._helpers import _rfc3339_nanos_to_datetime 

27from google.cloud.exceptions import NotFound 

28from google.api_core.iam import Policy 

29from google.cloud.storage import _signing 

30from google.cloud.storage._helpers import _add_etag_match_headers 

31from google.cloud.storage._helpers import _add_generation_match_parameters 

32from google.cloud.storage._helpers import _NOW 

33from google.cloud.storage._helpers import _PropertyMixin 

34from google.cloud.storage._helpers import _UTC 

35from google.cloud.storage._helpers import _scalar_property 

36from google.cloud.storage._helpers import _validate_name 

37from google.cloud.storage._signing import generate_signed_url_v2 

38from google.cloud.storage._signing import generate_signed_url_v4 

39from google.cloud.storage._helpers import _bucket_bound_hostname_url 

40from google.cloud.storage._helpers import _virtual_hosted_style_base_url 

41from google.cloud.storage._opentelemetry_tracing import create_trace_span 

42from google.cloud.storage.acl import BucketACL 

43from google.cloud.storage.acl import DefaultObjectACL 

44from google.cloud.storage.blob import Blob 

45from google.cloud.storage.constants import _DEFAULT_TIMEOUT 

46from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS 

47from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS 

48from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE 

49from google.cloud.storage.constants import ( 

50 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, 

51) 

52from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS 

53from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE 

54from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS 

55from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED 

56from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS 

57from google.cloud.storage.constants import REGION_LOCATION_TYPE 

58from google.cloud.storage.constants import STANDARD_STORAGE_CLASS 

59from google.cloud.storage.ip_filter import IPFilter 

60from google.cloud.storage.notification import BucketNotification 

61from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT 

62from google.cloud.storage.retry import DEFAULT_RETRY 

63from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED 

64from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON 

65from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED 

66 

67 

68_UBLA_BPO_ENABLED_MESSAGE = ( 

69 "Pass only one of 'uniform_bucket_level_access_enabled' / " 

70 "'bucket_policy_only_enabled' to 'IAMConfiguration'." 

71) 

72_BPO_ENABLED_MESSAGE = ( 

73 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. " 

74 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'." 

75) 

76_UBLA_BPO_LOCK_TIME_MESSAGE = ( 

77 "Pass only one of 'uniform_bucket_level_access_lock_time' / " 

78 "'bucket_policy_only_lock_time' to 'IAMConfiguration'." 

79) 

80_BPO_LOCK_TIME_MESSAGE = ( 

81 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. " 

82 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'." 

83) 

84_LOCATION_SETTER_MESSAGE = ( 

85 "Assignment to 'Bucket.location' is deprecated, as it is only " 

86 "valid before the bucket is created. Instead, pass the location " 

87 "to `Bucket.create`." 

88) 

89_FROM_STRING_MESSAGE = ( 

90 "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead." 

91) 

92_IP_FILTER_PROPERTY = "ipFilter" 

93 

94 

95def _blobs_page_start(iterator, page, response): 

96 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started. 

97 

98 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

99 :param iterator: The iterator that is currently in use. 

100 

101 :type page: :class:`~google.cloud.api.core.page_iterator.Page` 

102 :param page: The page that was just created. 

103 

104 :type response: dict 

105 :param response: The JSON API response for a page of blobs. 

106 """ 

107 page.prefixes = tuple(response.get("prefixes", ())) 

108 iterator.prefixes.update(page.prefixes) 

109 

110 

111def _item_to_blob(iterator, item): 

112 """Convert a JSON blob to the native object. 

113 

114 .. note:: 

115 

116 This assumes that the ``bucket`` attribute has been 

117 added to the iterator after being created. 

118 

119 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

120 :param iterator: The iterator that has retrieved the item. 

121 

122 :type item: dict 

123 :param item: An item to be converted to a blob. 

124 

125 :rtype: :class:`.Blob` 

126 :returns: The next blob in the page. 

127 """ 

128 name = item.get("name") 

129 blob = Blob(name, bucket=iterator.bucket) 

130 blob._set_properties(item) 

131 return blob 

132 

133 

134def _item_to_notification(iterator, item): 

135 """Convert a JSON blob to the native object. 

136 

137 .. note:: 

138 

139 This assumes that the ``bucket`` attribute has been 

140 added to the iterator after being created. 

141 

142 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

143 :param iterator: The iterator that has retrieved the item. 

144 

145 :type item: dict 

146 :param item: An item to be converted to a blob. 

147 

148 :rtype: :class:`.BucketNotification` 

149 :returns: The next notification being iterated. 

150 """ 

151 return BucketNotification.from_api_repr(item, bucket=iterator.bucket) 

152 

153 

154class LifecycleRuleConditions(dict): 

155 """Map a single lifecycle rule for a bucket. 

156 

157 See: https://cloud.google.com/storage/docs/lifecycle 

158 

159 :type age: int 

160 :param age: (Optional) Apply rule action to items whose age, in days, 

161 exceeds this value. 

162 

163 :type created_before: datetime.date 

164 :param created_before: (Optional) Apply rule action to items created 

165 before this date. 

166 

167 :type is_live: bool 

168 :param is_live: (Optional) If true, apply rule action to non-versioned 

169 items, or to items with no newer versions. If false, apply 

170 rule action to versioned items with at least one newer 

171 version. 

172 

173 :type matches_prefix: list(str) 

174 :param matches_prefix: (Optional) Apply rule action to items which 

175 any prefix matches the beginning of the item name. 

176 

177 :type matches_storage_class: list(str), one or more of 

178 :attr:`Bucket.STORAGE_CLASSES`. 

179 :param matches_storage_class: (Optional) Apply rule action to items 

180 whose storage class matches this value. 

181 

182 :type matches_suffix: list(str) 

183 :param matches_suffix: (Optional) Apply rule action to items which 

184 any suffix matches the end of the item name. 

185 

186 :type number_of_newer_versions: int 

187 :param number_of_newer_versions: (Optional) Apply rule action to versioned 

188 items having N newer versions. 

189 

190 :type days_since_custom_time: int 

191 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days 

192 elapsed since the custom timestamp. This condition is relevant 

193 only for versioned objects. The value of the field must be a non 

194 negative integer. If it's zero, the object version will become 

195 eligible for lifecycle action as soon as it becomes custom. 

196 

197 :type custom_time_before: :class:`datetime.date` 

198 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action 

199 to items whose custom time is before this date. This condition is relevant 

200 only for versioned objects, e.g., 2019-03-16. 

201 

202 :type days_since_noncurrent_time: int 

203 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days 

204 elapsed since the non current timestamp. This condition 

205 is relevant only for versioned objects. The value of the field 

206 must be a non negative integer. If it's zero, the object version 

207 will become eligible for lifecycle action as soon as it becomes 

208 non current. 

209 

210 :type noncurrent_time_before: :class:`datetime.date` 

211 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply 

212 rule action to items whose non current time is before this date. 

213 This condition is relevant only for versioned objects, e.g, 2019-03-16. 

214 

215 :raises ValueError: if no arguments are passed. 

216 """ 

217 

218 def __init__( 

219 self, 

220 age=None, 

221 created_before=None, 

222 is_live=None, 

223 matches_storage_class=None, 

224 number_of_newer_versions=None, 

225 days_since_custom_time=None, 

226 custom_time_before=None, 

227 days_since_noncurrent_time=None, 

228 noncurrent_time_before=None, 

229 matches_prefix=None, 

230 matches_suffix=None, 

231 _factory=False, 

232 ): 

233 conditions = {} 

234 

235 if age is not None: 

236 conditions["age"] = age 

237 

238 if created_before is not None: 

239 conditions["createdBefore"] = created_before.isoformat() 

240 

241 if is_live is not None: 

242 conditions["isLive"] = is_live 

243 

244 if matches_storage_class is not None: 

245 conditions["matchesStorageClass"] = matches_storage_class 

246 

247 if number_of_newer_versions is not None: 

248 conditions["numNewerVersions"] = number_of_newer_versions 

249 

250 if days_since_custom_time is not None: 

251 conditions["daysSinceCustomTime"] = days_since_custom_time 

252 

253 if custom_time_before is not None: 

254 conditions["customTimeBefore"] = custom_time_before.isoformat() 

255 

256 if days_since_noncurrent_time is not None: 

257 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time 

258 

259 if noncurrent_time_before is not None: 

260 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat() 

261 

262 if matches_prefix is not None: 

263 conditions["matchesPrefix"] = matches_prefix 

264 

265 if matches_suffix is not None: 

266 conditions["matchesSuffix"] = matches_suffix 

267 

268 if not _factory and not conditions: 

269 raise ValueError("Supply at least one condition") 

270 

271 super(LifecycleRuleConditions, self).__init__(conditions) 

272 

273 @classmethod 

274 def from_api_repr(cls, resource): 

275 """Factory: construct instance from resource. 

276 

277 :type resource: dict 

278 :param resource: mapping as returned from API call. 

279 

280 :rtype: :class:`LifecycleRuleConditions` 

281 :returns: Instance created from resource. 

282 """ 

283 instance = cls(_factory=True) 

284 instance.update(resource) 

285 return instance 

286 

287 @property 

288 def age(self): 

289 """Conditon's age value.""" 

290 return self.get("age") 

291 

292 @property 

293 def created_before(self): 

294 """Conditon's created_before value.""" 

295 before = self.get("createdBefore") 

296 if before is not None: 

297 return datetime_helpers.from_iso8601_date(before) 

298 

299 @property 

300 def is_live(self): 

301 """Conditon's 'is_live' value.""" 

302 return self.get("isLive") 

303 

304 @property 

305 def matches_prefix(self): 

306 """Conditon's 'matches_prefix' value.""" 

307 return self.get("matchesPrefix") 

308 

309 @property 

310 def matches_storage_class(self): 

311 """Conditon's 'matches_storage_class' value.""" 

312 return self.get("matchesStorageClass") 

313 

314 @property 

315 def matches_suffix(self): 

316 """Conditon's 'matches_suffix' value.""" 

317 return self.get("matchesSuffix") 

318 

319 @property 

320 def number_of_newer_versions(self): 

321 """Conditon's 'number_of_newer_versions' value.""" 

322 return self.get("numNewerVersions") 

323 

324 @property 

325 def days_since_custom_time(self): 

326 """Conditon's 'days_since_custom_time' value.""" 

327 return self.get("daysSinceCustomTime") 

328 

329 @property 

330 def custom_time_before(self): 

331 """Conditon's 'custom_time_before' value.""" 

332 before = self.get("customTimeBefore") 

333 if before is not None: 

334 return datetime_helpers.from_iso8601_date(before) 

335 

336 @property 

337 def days_since_noncurrent_time(self): 

338 """Conditon's 'days_since_noncurrent_time' value.""" 

339 return self.get("daysSinceNoncurrentTime") 

340 

341 @property 

342 def noncurrent_time_before(self): 

343 """Conditon's 'noncurrent_time_before' value.""" 

344 before = self.get("noncurrentTimeBefore") 

345 if before is not None: 

346 return datetime_helpers.from_iso8601_date(before) 

347 

348 

349class LifecycleRuleDelete(dict): 

350 """Map a lifecycle rule deleting matching items. 

351 

352 :type kw: dict 

353 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

354 """ 

355 

356 def __init__(self, **kw): 

357 conditions = LifecycleRuleConditions(**kw) 

358 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)} 

359 super().__init__(rule) 

360 

361 @classmethod 

362 def from_api_repr(cls, resource): 

363 """Factory: construct instance from resource. 

364 

365 :type resource: dict 

366 :param resource: mapping as returned from API call. 

367 

368 :rtype: :class:`LifecycleRuleDelete` 

369 :returns: Instance created from resource. 

370 """ 

371 instance = cls(_factory=True) 

372 instance.update(resource) 

373 return instance 

374 

375 

376class LifecycleRuleSetStorageClass(dict): 

377 """Map a lifecycle rule updating storage class of matching items. 

378 

379 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`. 

380 :param storage_class: new storage class to assign to matching items. 

381 

382 :type kw: dict 

383 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

384 """ 

385 

386 def __init__(self, storage_class, **kw): 

387 conditions = LifecycleRuleConditions(**kw) 

388 rule = { 

389 "action": {"type": "SetStorageClass", "storageClass": storage_class}, 

390 "condition": dict(conditions), 

391 } 

392 super().__init__(rule) 

393 

394 @classmethod 

395 def from_api_repr(cls, resource): 

396 """Factory: construct instance from resource. 

397 

398 :type resource: dict 

399 :param resource: mapping as returned from API call. 

400 

401 :rtype: :class:`LifecycleRuleSetStorageClass` 

402 :returns: Instance created from resource. 

403 """ 

404 action = resource["action"] 

405 instance = cls(action["storageClass"], _factory=True) 

406 instance.update(resource) 

407 return instance 

408 

409 

410class LifecycleRuleAbortIncompleteMultipartUpload(dict): 

411 """Map a rule aborting incomplete multipart uploads of matching items. 

412 

413 The "age" lifecycle condition is the only supported condition for this rule. 

414 

415 :type kw: dict 

416 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

417 """ 

418 

419 def __init__(self, **kw): 

420 conditions = LifecycleRuleConditions(**kw) 

421 rule = { 

422 "action": {"type": "AbortIncompleteMultipartUpload"}, 

423 "condition": dict(conditions), 

424 } 

425 super().__init__(rule) 

426 

427 @classmethod 

428 def from_api_repr(cls, resource): 

429 """Factory: construct instance from resource. 

430 

431 :type resource: dict 

432 :param resource: mapping as returned from API call. 

433 

434 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload` 

435 :returns: Instance created from resource. 

436 """ 

437 instance = cls(_factory=True) 

438 instance.update(resource) 

439 return instance 

440 

441 

442_default = object() 

443 

444 

445class IAMConfiguration(dict): 

446 """Map a bucket's IAM configuration. 

447 

448 :type bucket: :class:`Bucket` 

449 :params bucket: Bucket for which this instance is the policy. 

450 

451 :type public_access_prevention: str 

452 :params public_access_prevention: 

453 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced' 

454 See: https://cloud.google.com/storage/docs/public-access-prevention 

455 

456 :type uniform_bucket_level_access_enabled: bool 

457 :params bucket_policy_only_enabled: 

458 (Optional) Whether the IAM-only policy is enabled for the bucket. 

459 

460 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime` 

461 :params uniform_bucket_level_locked_time: 

462 (Optional) When the bucket's IAM-only policy was enabled. 

463 This value should normally only be set by the back-end API. 

464 

465 :type bucket_policy_only_enabled: bool 

466 :params bucket_policy_only_enabled: 

467 Deprecated alias for :data:`uniform_bucket_level_access_enabled`. 

468 

469 :type bucket_policy_only_locked_time: :class:`datetime.datetime` 

470 :params bucket_policy_only_locked_time: 

471 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`. 

472 """ 

473 

474 def __init__( 

475 self, 

476 bucket, 

477 public_access_prevention=_default, 

478 uniform_bucket_level_access_enabled=_default, 

479 uniform_bucket_level_access_locked_time=_default, 

480 bucket_policy_only_enabled=_default, 

481 bucket_policy_only_locked_time=_default, 

482 ): 

483 if bucket_policy_only_enabled is not _default: 

484 if uniform_bucket_level_access_enabled is not _default: 

485 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE) 

486 

487 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

488 uniform_bucket_level_access_enabled = bucket_policy_only_enabled 

489 

490 if bucket_policy_only_locked_time is not _default: 

491 if uniform_bucket_level_access_locked_time is not _default: 

492 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE) 

493 

494 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2) 

495 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time 

496 

497 if uniform_bucket_level_access_enabled is _default: 

498 uniform_bucket_level_access_enabled = False 

499 

500 if public_access_prevention is _default: 

501 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED 

502 

503 data = { 

504 "uniformBucketLevelAccess": { 

505 "enabled": uniform_bucket_level_access_enabled 

506 }, 

507 "publicAccessPrevention": public_access_prevention, 

508 } 

509 if uniform_bucket_level_access_locked_time is not _default: 

510 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339( 

511 uniform_bucket_level_access_locked_time 

512 ) 

513 super(IAMConfiguration, self).__init__(data) 

514 self._bucket = bucket 

515 

516 @classmethod 

517 def from_api_repr(cls, resource, bucket): 

518 """Factory: construct instance from resource. 

519 

520 :type bucket: :class:`Bucket` 

521 :params bucket: Bucket for which this instance is the policy. 

522 

523 :type resource: dict 

524 :param resource: mapping as returned from API call. 

525 

526 :rtype: :class:`IAMConfiguration` 

527 :returns: Instance created from resource. 

528 """ 

529 instance = cls(bucket) 

530 instance.update(resource) 

531 return instance 

532 

533 @property 

534 def bucket(self): 

535 """Bucket for which this instance is the policy. 

536 

537 :rtype: :class:`Bucket` 

538 :returns: the instance's bucket. 

539 """ 

540 return self._bucket 

541 

542 @property 

543 def public_access_prevention(self): 

544 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'. 

545 

546 See: https://cloud.google.com/storage/docs/public-access-prevention 

547 

548 :rtype: string 

549 :returns: the public access prevention status, either 'enforced' or 'inherited'. 

550 """ 

551 return self["publicAccessPrevention"] 

552 

553 @public_access_prevention.setter 

554 def public_access_prevention(self, value): 

555 self["publicAccessPrevention"] = value 

556 self.bucket._patch_property("iamConfiguration", self) 

557 

558 @property 

559 def uniform_bucket_level_access_enabled(self): 

560 """If set, access checks only use bucket-level IAM policies or above. 

561 

562 :rtype: bool 

563 :returns: whether the bucket is configured to allow only IAM. 

564 """ 

565 ubla = self.get("uniformBucketLevelAccess", {}) 

566 return ubla.get("enabled", False) 

567 

568 @uniform_bucket_level_access_enabled.setter 

569 def uniform_bucket_level_access_enabled(self, value): 

570 ubla = self.setdefault("uniformBucketLevelAccess", {}) 

571 ubla["enabled"] = bool(value) 

572 self.bucket._patch_property("iamConfiguration", self) 

573 

574 @property 

575 def uniform_bucket_level_access_locked_time(self): 

576 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false. 

577 

578 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property 

579 is time time after which that setting becomes immutable. 

580 

581 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property 

582 is ``None``. 

583 

584 :rtype: Union[:class:`datetime.datetime`, None] 

585 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will 

586 be frozen as true. 

587 """ 

588 ubla = self.get("uniformBucketLevelAccess", {}) 

589 stamp = ubla.get("lockedTime") 

590 if stamp is not None: 

591 stamp = _rfc3339_nanos_to_datetime(stamp) 

592 return stamp 

593 

594 @property 

595 def bucket_policy_only_enabled(self): 

596 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`. 

597 

598 :rtype: bool 

599 :returns: whether the bucket is configured to allow only IAM. 

600 """ 

601 return self.uniform_bucket_level_access_enabled 

602 

603 @bucket_policy_only_enabled.setter 

604 def bucket_policy_only_enabled(self, value): 

605 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

606 self.uniform_bucket_level_access_enabled = value 

607 

608 @property 

609 def bucket_policy_only_locked_time(self): 

610 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`. 

611 

612 :rtype: Union[:class:`datetime.datetime`, None] 

613 :returns: 

614 (readonly) Time after which :attr:`bucket_policy_only_enabled` will 

615 be frozen as true. 

616 """ 

617 return self.uniform_bucket_level_access_locked_time 

618 

619 

620class Bucket(_PropertyMixin): 

621 """A class representing a Bucket on Cloud Storage. 

622 

623 :type client: :class:`google.cloud.storage.client.Client` 

624 :param client: A client which holds credentials and project configuration 

625 for the bucket (which requires a project). 

626 

627 :type name: str 

628 :param name: The name of the bucket. Bucket names must start and end with a 

629 number or letter. 

630 

631 :type user_project: str 

632 :param user_project: (Optional) the project ID to be billed for API 

633 requests made via this instance. 

634 

635 :type generation: int 

636 :param generation: (Optional) If present, selects a specific revision of 

637 this bucket. 

638 """ 

639 

640 _MAX_OBJECTS_FOR_ITERATION = 256 

641 """Maximum number of existing objects allowed in iteration. 

642 

643 This is used in Bucket.delete() and Bucket.make_public(). 

644 """ 

645 

646 STORAGE_CLASSES = ( 

647 STANDARD_STORAGE_CLASS, 

648 NEARLINE_STORAGE_CLASS, 

649 COLDLINE_STORAGE_CLASS, 

650 ARCHIVE_STORAGE_CLASS, 

651 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

652 REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

653 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy 

654 ) 

655 """Allowed values for :attr:`storage_class`. 

656 

657 Default value is :attr:`STANDARD_STORAGE_CLASS`. 

658 

659 See 

660 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass 

661 https://cloud.google.com/storage/docs/storage-classes 

662 """ 

663 

664 _LOCATION_TYPES = ( 

665 MULTI_REGION_LOCATION_TYPE, 

666 REGION_LOCATION_TYPE, 

667 DUAL_REGION_LOCATION_TYPE, 

668 ) 

669 """Allowed values for :attr:`location_type`.""" 

670 

671 def __init__(self, client, name=None, user_project=None, generation=None): 

672 """ 

673 property :attr:`name` 

674 Get the bucket's name. 

675 """ 

676 name = _validate_name(name) 

677 super(Bucket, self).__init__(name=name) 

678 self._client = client 

679 self._acl = BucketACL(self) 

680 self._default_object_acl = DefaultObjectACL(self) 

681 self._label_removals = set() 

682 self._user_project = user_project 

683 

684 if generation is not None: 

685 self._properties["generation"] = generation 

686 

687 def __repr__(self): 

688 return f"<Bucket: {self.name}>" 

689 

690 @property 

691 def client(self): 

692 """The client bound to this bucket.""" 

693 return self._client 

694 

695 def _set_properties(self, value): 

696 """Set the properties for the current object. 

697 

698 :type value: dict or :class:`google.cloud.storage.batch._FutureDict` 

699 :param value: The properties to be set. 

700 """ 

701 self._label_removals.clear() 

702 return super(Bucket, self)._set_properties(value) 

703 

704 @property 

705 def rpo(self): 

706 """Get the RPO (Recovery Point Objective) of this bucket 

707 

708 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

709 

710 "ASYNC_TURBO" or "DEFAULT" 

711 :rtype: str 

712 """ 

713 return self._properties.get("rpo") 

714 

715 @rpo.setter 

716 def rpo(self, value): 

717 """ 

718 Set the RPO (Recovery Point Objective) of this bucket. 

719 

720 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

721 

722 :type value: str 

723 :param value: "ASYNC_TURBO" or "DEFAULT" 

724 """ 

725 self._patch_property("rpo", value) 

726 

727 @property 

728 def user_project(self): 

729 """Project ID to be billed for API requests made via this bucket. 

730 

731 If unset, API requests are billed to the bucket owner. 

732 

733 A user project is required for all operations on Requester Pays buckets. 

734 

735 See https://cloud.google.com/storage/docs/requester-pays#requirements for details. 

736 

737 :rtype: str 

738 """ 

739 return self._user_project 

740 

741 @property 

742 def generation(self): 

743 """Retrieve the generation for the bucket. 

744 

745 :rtype: int or ``NoneType`` 

746 :returns: The generation of the bucket or ``None`` if the bucket's 

747 resource has not been loaded from the server. 

748 """ 

749 generation = self._properties.get("generation") 

750 if generation is not None: 

751 return int(generation) 

752 

753 @property 

754 def soft_delete_time(self): 

755 """If this bucket has been soft-deleted, returns the time at which it became soft-deleted. 

756 

757 :rtype: :class:`datetime.datetime` or ``NoneType`` 

758 :returns: 

759 (readonly) The time that the bucket became soft-deleted. 

760 Note this property is only set for soft-deleted buckets. 

761 """ 

762 soft_delete_time = self._properties.get("softDeleteTime") 

763 if soft_delete_time is not None: 

764 return _rfc3339_nanos_to_datetime(soft_delete_time) 

765 

766 @property 

767 def hard_delete_time(self): 

768 """If this bucket has been soft-deleted, returns the time at which it will be permanently deleted. 

769 

770 :rtype: :class:`datetime.datetime` or ``NoneType`` 

771 :returns: 

772 (readonly) The time that the bucket will be permanently deleted. 

773 Note this property is only set for soft-deleted buckets. 

774 """ 

775 hard_delete_time = self._properties.get("hardDeleteTime") 

776 if hard_delete_time is not None: 

777 return _rfc3339_nanos_to_datetime(hard_delete_time) 

778 

779 @property 

780 def _query_params(self): 

781 """Default query parameters.""" 

782 params = super()._query_params 

783 return params 

784 

785 @classmethod 

786 def from_uri(cls, uri, client=None): 

787 """Get a constructor for bucket object by URI. 

788 

789 .. code-block:: python 

790 

791 from google.cloud import storage 

792 from google.cloud.storage.bucket import Bucket 

793 client = storage.Client() 

794 bucket = Bucket.from_uri("gs://bucket", client=client) 

795 

796 :type uri: str 

797 :param uri: The bucket uri pass to get bucket object. 

798 

799 :type client: :class:`~google.cloud.storage.client.Client` or 

800 ``NoneType`` 

801 :param client: (Optional) The client to use. Application code should 

802 *always* pass ``client``. 

803 

804 :rtype: :class:`google.cloud.storage.bucket.Bucket` 

805 :returns: The bucket object created. 

806 """ 

807 scheme, netloc, path, query, frag = urlsplit(uri) 

808 

809 if scheme != "gs": 

810 raise ValueError("URI scheme must be gs") 

811 

812 return cls(client, name=netloc) 

813 

814 @classmethod 

815 def from_string(cls, uri, client=None): 

816 """Get a constructor for bucket object by URI. 

817 

818 .. note:: 

819 Deprecated alias for :meth:`from_uri`. 

820 

821 .. code-block:: python 

822 

823 from google.cloud import storage 

824 from google.cloud.storage.bucket import Bucket 

825 client = storage.Client() 

826 bucket = Bucket.from_string("gs://bucket", client=client) 

827 

828 :type uri: str 

829 :param uri: The bucket uri pass to get bucket object. 

830 

831 :type client: :class:`~google.cloud.storage.client.Client` or 

832 ``NoneType`` 

833 :param client: (Optional) The client to use. Application code should 

834 *always* pass ``client``. 

835 

836 :rtype: :class:`google.cloud.storage.bucket.Bucket` 

837 :returns: The bucket object created. 

838 """ 

839 warnings.warn(_FROM_STRING_MESSAGE, PendingDeprecationWarning, stacklevel=2) 

840 return Bucket.from_uri(uri=uri, client=client) 

841 

842 def blob( 

843 self, 

844 blob_name, 

845 chunk_size=None, 

846 encryption_key=None, 

847 kms_key_name=None, 

848 generation=None, 

849 ): 

850 """Factory constructor for blob object. 

851 

852 .. note:: 

853 This will not make an HTTP request; it simply instantiates 

854 a blob object owned by this bucket. 

855 

856 :type blob_name: str 

857 :param blob_name: The name of the blob to be instantiated. 

858 

859 :type chunk_size: int 

860 :param chunk_size: The size of a chunk of data whenever iterating 

861 (in bytes). This must be a multiple of 256 KB per 

862 the API specification. 

863 

864 :type encryption_key: bytes 

865 :param encryption_key: 

866 (Optional) 32 byte encryption key for customer-supplied encryption. 

867 

868 :type kms_key_name: str 

869 :param kms_key_name: 

870 (Optional) Resource name of KMS key used to encrypt blob's content. 

871 

872 :type generation: long 

873 :param generation: (Optional) If present, selects a specific revision of 

874 this object. 

875 

876 :rtype: :class:`google.cloud.storage.blob.Blob` 

877 :returns: The blob object created. 

878 """ 

879 return Blob( 

880 name=blob_name, 

881 bucket=self, 

882 chunk_size=chunk_size, 

883 encryption_key=encryption_key, 

884 kms_key_name=kms_key_name, 

885 generation=generation, 

886 ) 

887 

888 def notification( 

889 self, 

890 topic_name=None, 

891 topic_project=None, 

892 custom_attributes=None, 

893 event_types=None, 

894 blob_name_prefix=None, 

895 payload_format=NONE_PAYLOAD_FORMAT, 

896 notification_id=None, 

897 ): 

898 """Factory: create a notification resource for the bucket. 

899 

900 See: :class:`.BucketNotification` for parameters. 

901 

902 :rtype: :class:`.BucketNotification` 

903 """ 

904 return BucketNotification( 

905 self, 

906 topic_name=topic_name, 

907 topic_project=topic_project, 

908 custom_attributes=custom_attributes, 

909 event_types=event_types, 

910 blob_name_prefix=blob_name_prefix, 

911 payload_format=payload_format, 

912 notification_id=notification_id, 

913 ) 

914 

915 def exists( 

916 self, 

917 client=None, 

918 timeout=_DEFAULT_TIMEOUT, 

919 if_etag_match=None, 

920 if_etag_not_match=None, 

921 if_metageneration_match=None, 

922 if_metageneration_not_match=None, 

923 retry=DEFAULT_RETRY, 

924 ): 

925 """Determines whether or not this bucket exists. 

926 

927 If :attr:`user_project` is set, bills the API request to that project. 

928 

929 :type client: :class:`~google.cloud.storage.client.Client` or 

930 ``NoneType`` 

931 :param client: (Optional) The client to use. If not passed, falls back 

932 to the ``client`` stored on the current bucket. 

933 

934 :type timeout: float or tuple 

935 :param timeout: 

936 (Optional) The amount of time, in seconds, to wait 

937 for the server response. See: :ref:`configuring_timeouts` 

938 

939 :type if_etag_match: Union[str, Set[str]] 

940 :param if_etag_match: (Optional) Make the operation conditional on whether the 

941 bucket's current ETag matches the given value. 

942 

943 :type if_etag_not_match: Union[str, Set[str]]) 

944 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

945 bucket's current ETag does not match the given value. 

946 

947 :type if_metageneration_match: long 

948 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

949 bucket's current metageneration matches the given value. 

950 

951 :type if_metageneration_not_match: long 

952 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

953 bucket's current metageneration does not match the given value. 

954 

955 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

956 :param retry: 

957 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

958 

959 :rtype: bool 

960 :returns: True if the bucket exists in Cloud Storage. 

961 """ 

962 with create_trace_span(name="Storage.Bucket.exists"): 

963 client = self._require_client(client) 

964 # We only need the status code (200 or not) so we seek to 

965 # minimize the returned payload. 

966 query_params = {"fields": "name"} 

967 

968 if self.user_project is not None: 

969 query_params["userProject"] = self.user_project 

970 

971 _add_generation_match_parameters( 

972 query_params, 

973 if_metageneration_match=if_metageneration_match, 

974 if_metageneration_not_match=if_metageneration_not_match, 

975 ) 

976 

977 headers = {} 

978 _add_etag_match_headers( 

979 headers, 

980 if_etag_match=if_etag_match, 

981 if_etag_not_match=if_etag_not_match, 

982 ) 

983 

984 try: 

985 # We intentionally pass `_target_object=None` since fields=name 

986 # would limit the local properties. 

987 client._get_resource( 

988 self.path, 

989 query_params=query_params, 

990 headers=headers, 

991 timeout=timeout, 

992 retry=retry, 

993 _target_object=None, 

994 ) 

995 except NotFound: 

996 # NOTE: This will not fail immediately in a batch. However, when 

997 # Batch.finish() is called, the resulting `NotFound` will be 

998 # raised. 

999 return False 

1000 return True 

1001 

1002 def create( 

1003 self, 

1004 client=None, 

1005 project=None, 

1006 location=None, 

1007 predefined_acl=None, 

1008 predefined_default_object_acl=None, 

1009 enable_object_retention=False, 

1010 timeout=_DEFAULT_TIMEOUT, 

1011 retry=DEFAULT_RETRY, 

1012 ): 

1013 """Creates current bucket. 

1014 

1015 If the bucket already exists, will raise 

1016 :class:`google.cloud.exceptions.Conflict`. 

1017 

1018 This implements "storage.buckets.insert". 

1019 

1020 If :attr:`user_project` is set, bills the API request to that project. 

1021 

1022 :type client: :class:`~google.cloud.storage.client.Client` or 

1023 ``NoneType`` 

1024 :param client: (Optional) The client to use. If not passed, falls back 

1025 to the ``client`` stored on the current bucket. 

1026 

1027 :type project: str 

1028 :param project: (Optional) The project under which the bucket is to 

1029 be created. If not passed, uses the project set on 

1030 the client. 

1031 :raises ValueError: if ``project`` is None and client's 

1032 :attr:`project` is also None. 

1033 

1034 :type location: str 

1035 :param location: (Optional) The location of the bucket. If not passed, 

1036 the default location, US, will be used. See 

1037 https://cloud.google.com/storage/docs/bucket-locations 

1038 

1039 :type predefined_acl: str 

1040 :param predefined_acl: 

1041 (Optional) Name of predefined ACL to apply to bucket. See: 

1042 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

1043 

1044 :type predefined_default_object_acl: str 

1045 :param predefined_default_object_acl: 

1046 (Optional) Name of predefined ACL to apply to bucket's objects. See: 

1047 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

1048 

1049 :type enable_object_retention: bool 

1050 :param enable_object_retention: 

1051 (Optional) Whether object retention should be enabled on this bucket. See: 

1052 https://cloud.google.com/storage/docs/object-lock 

1053 

1054 :type timeout: float or tuple 

1055 :param timeout: 

1056 (Optional) The amount of time, in seconds, to wait 

1057 for the server response. See: :ref:`configuring_timeouts` 

1058 

1059 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1060 :param retry: 

1061 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1062 """ 

1063 with create_trace_span(name="Storage.Bucket.create"): 

1064 client = self._require_client(client) 

1065 client.create_bucket( 

1066 bucket_or_name=self, 

1067 project=project, 

1068 user_project=self.user_project, 

1069 location=location, 

1070 predefined_acl=predefined_acl, 

1071 predefined_default_object_acl=predefined_default_object_acl, 

1072 enable_object_retention=enable_object_retention, 

1073 timeout=timeout, 

1074 retry=retry, 

1075 ) 

1076 

1077 def update( 

1078 self, 

1079 client=None, 

1080 timeout=_DEFAULT_TIMEOUT, 

1081 if_metageneration_match=None, 

1082 if_metageneration_not_match=None, 

1083 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

1084 ): 

1085 """Sends all properties in a PUT request. 

1086 

1087 Updates the ``_properties`` with the response from the backend. 

1088 

1089 If :attr:`user_project` is set, bills the API request to that project. 

1090 

1091 :type client: :class:`~google.cloud.storage.client.Client` or 

1092 ``NoneType`` 

1093 :param client: the client to use. If not passed, falls back to the 

1094 ``client`` stored on the current object. 

1095 

1096 :type timeout: float or tuple 

1097 :param timeout: 

1098 (Optional) The amount of time, in seconds, to wait 

1099 for the server response. See: :ref:`configuring_timeouts` 

1100 

1101 :type if_metageneration_match: long 

1102 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1103 blob's current metageneration matches the given value. 

1104 

1105 :type if_metageneration_not_match: long 

1106 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1107 blob's current metageneration does not match the given value. 

1108 

1109 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1110 :param retry: 

1111 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1112 """ 

1113 with create_trace_span(name="Storage.Bucket.update"): 

1114 super(Bucket, self).update( 

1115 client=client, 

1116 timeout=timeout, 

1117 if_metageneration_match=if_metageneration_match, 

1118 if_metageneration_not_match=if_metageneration_not_match, 

1119 retry=retry, 

1120 ) 

1121 

1122 def reload( 

1123 self, 

1124 client=None, 

1125 projection="noAcl", 

1126 timeout=_DEFAULT_TIMEOUT, 

1127 if_etag_match=None, 

1128 if_etag_not_match=None, 

1129 if_metageneration_match=None, 

1130 if_metageneration_not_match=None, 

1131 retry=DEFAULT_RETRY, 

1132 soft_deleted=None, 

1133 ): 

1134 """Reload properties from Cloud Storage. 

1135 

1136 If :attr:`user_project` is set, bills the API request to that project. 

1137 

1138 :type client: :class:`~google.cloud.storage.client.Client` or 

1139 ``NoneType`` 

1140 :param client: the client to use. If not passed, falls back to the 

1141 ``client`` stored on the current object. 

1142 

1143 :type projection: str 

1144 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1145 Defaults to ``'noAcl'``. Specifies the set of 

1146 properties to return. 

1147 

1148 :type timeout: float or tuple 

1149 :param timeout: 

1150 (Optional) The amount of time, in seconds, to wait 

1151 for the server response. See: :ref:`configuring_timeouts` 

1152 

1153 :type if_etag_match: Union[str, Set[str]] 

1154 :param if_etag_match: (Optional) Make the operation conditional on whether the 

1155 bucket's current ETag matches the given value. 

1156 

1157 :type if_etag_not_match: Union[str, Set[str]]) 

1158 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

1159 bucket's current ETag does not match the given value. 

1160 

1161 :type if_metageneration_match: long 

1162 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1163 bucket's current metageneration matches the given value. 

1164 

1165 :type if_metageneration_not_match: long 

1166 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1167 bucket's current metageneration does not match the given value. 

1168 

1169 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1170 :param retry: 

1171 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1172 

1173 :type soft_deleted: bool 

1174 :param soft_deleted: (Optional) If True, looks for a soft-deleted 

1175 bucket. Will only return the bucket metadata if the bucket exists 

1176 and is in a soft-deleted state. The bucket ``generation`` must be 

1177 set if ``soft_deleted`` is set to True. 

1178 See: https://cloud.google.com/storage/docs/soft-delete 

1179 """ 

1180 with create_trace_span(name="Storage.Bucket.reload"): 

1181 super(Bucket, self).reload( 

1182 client=client, 

1183 projection=projection, 

1184 timeout=timeout, 

1185 if_etag_match=if_etag_match, 

1186 if_etag_not_match=if_etag_not_match, 

1187 if_metageneration_match=if_metageneration_match, 

1188 if_metageneration_not_match=if_metageneration_not_match, 

1189 retry=retry, 

1190 soft_deleted=soft_deleted, 

1191 ) 

1192 

1193 def patch( 

1194 self, 

1195 client=None, 

1196 timeout=_DEFAULT_TIMEOUT, 

1197 if_metageneration_match=None, 

1198 if_metageneration_not_match=None, 

1199 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

1200 ): 

1201 """Sends all changed properties in a PATCH request. 

1202 

1203 Updates the ``_properties`` with the response from the backend. 

1204 

1205 If :attr:`user_project` is set, bills the API request to that project. 

1206 

1207 :type client: :class:`~google.cloud.storage.client.Client` or 

1208 ``NoneType`` 

1209 :param client: the client to use. If not passed, falls back to the 

1210 ``client`` stored on the current object. 

1211 

1212 :type timeout: float or tuple 

1213 :param timeout: 

1214 (Optional) The amount of time, in seconds, to wait 

1215 for the server response. See: :ref:`configuring_timeouts` 

1216 

1217 :type if_metageneration_match: long 

1218 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1219 blob's current metageneration matches the given value. 

1220 

1221 :type if_metageneration_not_match: long 

1222 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1223 blob's current metageneration does not match the given value. 

1224 

1225 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1226 :param retry: 

1227 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1228 """ 

1229 with create_trace_span(name="Storage.Bucket.patch"): 

1230 # Special case: For buckets, it is possible that labels are being 

1231 # removed; this requires special handling. 

1232 if self._label_removals: 

1233 self._changes.add("labels") 

1234 self._properties.setdefault("labels", {}) 

1235 for removed_label in self._label_removals: 

1236 self._properties["labels"][removed_label] = None 

1237 

1238 # Call the superclass method. 

1239 super(Bucket, self).patch( 

1240 client=client, 

1241 if_metageneration_match=if_metageneration_match, 

1242 if_metageneration_not_match=if_metageneration_not_match, 

1243 timeout=timeout, 

1244 retry=retry, 

1245 ) 

1246 

1247 @property 

1248 def acl(self): 

1249 """Create our ACL on demand.""" 

1250 return self._acl 

1251 

1252 @property 

1253 def default_object_acl(self): 

1254 """Create our defaultObjectACL on demand.""" 

1255 return self._default_object_acl 

1256 

1257 @staticmethod 

1258 def path_helper(bucket_name): 

1259 """Relative URL path for a bucket. 

1260 

1261 :type bucket_name: str 

1262 :param bucket_name: The bucket name in the path. 

1263 

1264 :rtype: str 

1265 :returns: The relative URL path for ``bucket_name``. 

1266 """ 

1267 return "/b/" + bucket_name 

1268 

1269 @property 

1270 def path(self): 

1271 """The URL path to this bucket.""" 

1272 if not self.name: 

1273 raise ValueError("Cannot determine path without bucket name.") 

1274 

1275 return self.path_helper(self.name) 

1276 

1277 def get_blob( 

1278 self, 

1279 blob_name, 

1280 client=None, 

1281 encryption_key=None, 

1282 generation=None, 

1283 if_etag_match=None, 

1284 if_etag_not_match=None, 

1285 if_generation_match=None, 

1286 if_generation_not_match=None, 

1287 if_metageneration_match=None, 

1288 if_metageneration_not_match=None, 

1289 timeout=_DEFAULT_TIMEOUT, 

1290 retry=DEFAULT_RETRY, 

1291 soft_deleted=None, 

1292 **kwargs, 

1293 ): 

1294 """Get a blob object by name. 

1295 

1296 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python) 

1297 on how to retrieve metadata of an object. 

1298 

1299 If :attr:`user_project` is set, bills the API request to that project. 

1300 

1301 :type blob_name: str 

1302 :param blob_name: The name of the blob to retrieve. 

1303 

1304 :type client: :class:`~google.cloud.storage.client.Client` or 

1305 ``NoneType`` 

1306 :param client: (Optional) The client to use. If not passed, falls back 

1307 to the ``client`` stored on the current bucket. 

1308 

1309 :type encryption_key: bytes 

1310 :param encryption_key: 

1311 (Optional) 32 byte encryption key for customer-supplied encryption. 

1312 See 

1313 https://cloud.google.com/storage/docs/encryption#customer-supplied. 

1314 

1315 :type generation: long 

1316 :param generation: 

1317 (Optional) If present, selects a specific revision of this object. 

1318 

1319 :type if_etag_match: Union[str, Set[str]] 

1320 :param if_etag_match: 

1321 (Optional) See :ref:`using-if-etag-match` 

1322 

1323 :type if_etag_not_match: Union[str, Set[str]] 

1324 :param if_etag_not_match: 

1325 (Optional) See :ref:`using-if-etag-not-match` 

1326 

1327 :type if_generation_match: long 

1328 :param if_generation_match: 

1329 (Optional) See :ref:`using-if-generation-match` 

1330 

1331 :type if_generation_not_match: long 

1332 :param if_generation_not_match: 

1333 (Optional) See :ref:`using-if-generation-not-match` 

1334 

1335 :type if_metageneration_match: long 

1336 :param if_metageneration_match: 

1337 (Optional) See :ref:`using-if-metageneration-match` 

1338 

1339 :type if_metageneration_not_match: long 

1340 :param if_metageneration_not_match: 

1341 (Optional) See :ref:`using-if-metageneration-not-match` 

1342 

1343 :type timeout: float or tuple 

1344 :param timeout: 

1345 (Optional) The amount of time, in seconds, to wait 

1346 for the server response. See: :ref:`configuring_timeouts` 

1347 

1348 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1349 :param retry: 

1350 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1351 

1352 :type soft_deleted: bool 

1353 :param soft_deleted: 

1354 (Optional) If True, looks for a soft-deleted object. Will only return 

1355 the object metadata if the object exists and is in a soft-deleted state. 

1356 Object ``generation`` is required if ``soft_deleted`` is set to True. 

1357 See: https://cloud.google.com/storage/docs/soft-delete 

1358 

1359 :param kwargs: Keyword arguments to pass to the 

1360 :class:`~google.cloud.storage.blob.Blob` constructor. 

1361 

1362 :rtype: :class:`google.cloud.storage.blob.Blob` or None 

1363 :returns: The blob object if it exists, otherwise None. 

1364 """ 

1365 with create_trace_span(name="Storage.Bucket.getBlob"): 

1366 blob = Blob( 

1367 bucket=self, 

1368 name=blob_name, 

1369 encryption_key=encryption_key, 

1370 generation=generation, 

1371 **kwargs, 

1372 ) 

1373 try: 

1374 # NOTE: This will not fail immediately in a batch. However, when 

1375 # Batch.finish() is called, the resulting `NotFound` will be 

1376 # raised. 

1377 blob.reload( 

1378 client=client, 

1379 timeout=timeout, 

1380 if_etag_match=if_etag_match, 

1381 if_etag_not_match=if_etag_not_match, 

1382 if_generation_match=if_generation_match, 

1383 if_generation_not_match=if_generation_not_match, 

1384 if_metageneration_match=if_metageneration_match, 

1385 if_metageneration_not_match=if_metageneration_not_match, 

1386 retry=retry, 

1387 soft_deleted=soft_deleted, 

1388 ) 

1389 except NotFound: 

1390 return None 

1391 else: 

1392 return blob 

1393 

1394 def list_blobs( 

1395 self, 

1396 max_results=None, 

1397 page_token=None, 

1398 prefix=None, 

1399 delimiter=None, 

1400 start_offset=None, 

1401 end_offset=None, 

1402 include_trailing_delimiter=None, 

1403 versions=None, 

1404 projection="noAcl", 

1405 fields=None, 

1406 client=None, 

1407 timeout=_DEFAULT_TIMEOUT, 

1408 retry=DEFAULT_RETRY, 

1409 match_glob=None, 

1410 include_folders_as_prefixes=None, 

1411 soft_deleted=None, 

1412 page_size=None, 

1413 ): 

1414 """Return an iterator used to find blobs in the bucket. 

1415 

1416 If :attr:`user_project` is set, bills the API request to that project. 

1417 

1418 :type max_results: int 

1419 :param max_results: 

1420 (Optional) The maximum number of blobs to return. 

1421 

1422 :type page_token: str 

1423 :param page_token: 

1424 (Optional) If present, return the next batch of blobs, using the 

1425 value, which must correspond to the ``nextPageToken`` value 

1426 returned in the previous response. Deprecated: use the ``pages`` 

1427 property of the returned iterator instead of manually passing the 

1428 token. 

1429 

1430 :type prefix: str 

1431 :param prefix: (Optional) Prefix used to filter blobs. 

1432 

1433 :type delimiter: str 

1434 :param delimiter: (Optional) Delimiter, used with ``prefix`` to 

1435 emulate hierarchy. 

1436 

1437 :type start_offset: str 

1438 :param start_offset: 

1439 (Optional) Filter results to objects whose names are 

1440 lexicographically equal to or after ``startOffset``. If 

1441 ``endOffset`` is also set, the objects listed will have names 

1442 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive). 

1443 

1444 :type end_offset: str 

1445 :param end_offset: 

1446 (Optional) Filter results to objects whose names are 

1447 lexicographically before ``endOffset``. If ``startOffset`` is also 

1448 set, the objects listed will have names between ``startOffset`` 

1449 (inclusive) and ``endOffset`` (exclusive). 

1450 

1451 :type include_trailing_delimiter: boolean 

1452 :param include_trailing_delimiter: 

1453 (Optional) If true, objects that end in exactly one instance of 

1454 ``delimiter`` will have their metadata included in ``items`` in 

1455 addition to ``prefixes``. 

1456 

1457 :type versions: bool 

1458 :param versions: (Optional) Whether object versions should be returned 

1459 as separate blobs. 

1460 

1461 :type projection: str 

1462 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1463 Defaults to ``'noAcl'``. Specifies the set of 

1464 properties to return. 

1465 

1466 :type fields: str 

1467 :param fields: 

1468 (Optional) Selector specifying which fields to include 

1469 in a partial response. Must be a list of fields. For 

1470 example to get a partial response with just the next 

1471 page token and the name and language of each blob returned: 

1472 ``'items(name,contentLanguage),nextPageToken'``. 

1473 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields 

1474 

1475 :type client: :class:`~google.cloud.storage.client.Client` 

1476 :param client: (Optional) The client to use. If not passed, falls back 

1477 to the ``client`` stored on the current bucket. 

1478 

1479 :type timeout: float or tuple 

1480 :param timeout: 

1481 (Optional) The amount of time, in seconds, to wait 

1482 for the server response. See: :ref:`configuring_timeouts` 

1483 

1484 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1485 :param retry: 

1486 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1487 

1488 :type match_glob: str 

1489 :param match_glob: 

1490 (Optional) A glob pattern used to filter results (for example, foo*bar). 

1491 The string value must be UTF-8 encoded. See: 

1492 https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob 

1493 

1494 :type include_folders_as_prefixes: bool 

1495 (Optional) If true, includes Folders and Managed Folders in the set of 

1496 ``prefixes`` returned by the query. Only applicable if ``delimiter`` is set to /. 

1497 See: https://cloud.google.com/storage/docs/managed-folders 

1498 

1499 :type soft_deleted: bool 

1500 :param soft_deleted: 

1501 (Optional) If true, only soft-deleted objects will be listed as distinct results in order of increasing 

1502 generation number. This parameter can only be used successfully if the bucket has a soft delete policy. 

1503 Note ``soft_deleted`` and ``versions`` cannot be set to True simultaneously. See: 

1504 https://cloud.google.com/storage/docs/soft-delete 

1505 

1506 :type page_size: int 

1507 :param page_size: 

1508 (Optional) Maximum number of blobs to return in each page. 

1509 Defaults to a value set by the API. 

1510 

1511 :rtype: :class:`~google.api_core.page_iterator.Iterator` 

1512 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` 

1513 in this bucket matching the arguments. 

1514 """ 

1515 with create_trace_span(name="Storage.Bucket.listBlobs"): 

1516 client = self._require_client(client) 

1517 return client.list_blobs( 

1518 self, 

1519 max_results=max_results, 

1520 page_token=page_token, 

1521 prefix=prefix, 

1522 delimiter=delimiter, 

1523 start_offset=start_offset, 

1524 end_offset=end_offset, 

1525 include_trailing_delimiter=include_trailing_delimiter, 

1526 versions=versions, 

1527 projection=projection, 

1528 fields=fields, 

1529 page_size=page_size, 

1530 timeout=timeout, 

1531 retry=retry, 

1532 match_glob=match_glob, 

1533 include_folders_as_prefixes=include_folders_as_prefixes, 

1534 soft_deleted=soft_deleted, 

1535 ) 

1536 

1537 def list_notifications( 

1538 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

1539 ): 

1540 """List Pub / Sub notifications for this bucket. 

1541 

1542 See: 

1543 https://cloud.google.com/storage/docs/json_api/v1/notifications/list 

1544 

1545 If :attr:`user_project` is set, bills the API request to that project. 

1546 

1547 :type client: :class:`~google.cloud.storage.client.Client` or 

1548 ``NoneType`` 

1549 :param client: (Optional) The client to use. If not passed, falls back 

1550 to the ``client`` stored on the current bucket. 

1551 :type timeout: float or tuple 

1552 :param timeout: 

1553 (Optional) The amount of time, in seconds, to wait 

1554 for the server response. See: :ref:`configuring_timeouts` 

1555 

1556 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1557 :param retry: 

1558 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1559 

1560 :rtype: list of :class:`.BucketNotification` 

1561 :returns: notification instances 

1562 """ 

1563 with create_trace_span(name="Storage.Bucket.listNotifications"): 

1564 client = self._require_client(client) 

1565 path = self.path + "/notificationConfigs" 

1566 iterator = client._list_resource( 

1567 path, 

1568 _item_to_notification, 

1569 timeout=timeout, 

1570 retry=retry, 

1571 ) 

1572 iterator.bucket = self 

1573 return iterator 

1574 

1575 def get_notification( 

1576 self, 

1577 notification_id, 

1578 client=None, 

1579 timeout=_DEFAULT_TIMEOUT, 

1580 retry=DEFAULT_RETRY, 

1581 ): 

1582 """Get Pub / Sub notification for this bucket. 

1583 

1584 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get) 

1585 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python). 

1586 

1587 If :attr:`user_project` is set, bills the API request to that project. 

1588 

1589 :type notification_id: str 

1590 :param notification_id: The notification id to retrieve the notification configuration. 

1591 

1592 :type client: :class:`~google.cloud.storage.client.Client` or 

1593 ``NoneType`` 

1594 :param client: (Optional) The client to use. If not passed, falls back 

1595 to the ``client`` stored on the current bucket. 

1596 :type timeout: float or tuple 

1597 :param timeout: 

1598 (Optional) The amount of time, in seconds, to wait 

1599 for the server response. See: :ref:`configuring_timeouts` 

1600 

1601 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1602 :param retry: 

1603 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1604 

1605 :rtype: :class:`.BucketNotification` 

1606 :returns: notification instance. 

1607 """ 

1608 with create_trace_span(name="Storage.Bucket.getNotification"): 

1609 notification = self.notification(notification_id=notification_id) 

1610 notification.reload(client=client, timeout=timeout, retry=retry) 

1611 return notification 

1612 

1613 def delete( 

1614 self, 

1615 force=False, 

1616 client=None, 

1617 if_metageneration_match=None, 

1618 if_metageneration_not_match=None, 

1619 timeout=_DEFAULT_TIMEOUT, 

1620 retry=DEFAULT_RETRY, 

1621 ): 

1622 """Delete this bucket. 

1623 

1624 The bucket **must** be empty in order to submit a delete request. If 

1625 ``force=True`` is passed, this will first attempt to delete all the 

1626 objects / blobs in the bucket (i.e. try to empty the bucket). 

1627 

1628 If the bucket doesn't exist, this will raise 

1629 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty 

1630 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`. 

1631 

1632 If ``force=True`` and the bucket contains more than 256 objects / blobs 

1633 this will cowardly refuse to delete the objects (or the bucket). This 

1634 is to prevent accidental bucket deletion and to prevent extremely long 

1635 runtime of this method. Also note that ``force=True`` is not supported 

1636 in a ``Batch`` context. 

1637 

1638 If :attr:`user_project` is set, bills the API request to that project. 

1639 

1640 :type force: bool 

1641 :param force: If True, empties the bucket's objects then deletes it. 

1642 

1643 :type client: :class:`~google.cloud.storage.client.Client` or 

1644 ``NoneType`` 

1645 :param client: (Optional) The client to use. If not passed, falls back 

1646 to the ``client`` stored on the current bucket. 

1647 

1648 :type if_metageneration_match: long 

1649 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1650 blob's current metageneration matches the given value. 

1651 

1652 :type if_metageneration_not_match: long 

1653 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1654 blob's current metageneration does not match the given value. 

1655 

1656 :type timeout: float or tuple 

1657 :param timeout: 

1658 (Optional) The amount of time, in seconds, to wait 

1659 for the server response. See: :ref:`configuring_timeouts` 

1660 

1661 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1662 :param retry: 

1663 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1664 

1665 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket 

1666 contains more than 256 objects / blobs. 

1667 """ 

1668 with create_trace_span(name="Storage.Bucket.delete"): 

1669 client = self._require_client(client) 

1670 query_params = {} 

1671 

1672 if self.user_project is not None: 

1673 query_params["userProject"] = self.user_project 

1674 

1675 _add_generation_match_parameters( 

1676 query_params, 

1677 if_metageneration_match=if_metageneration_match, 

1678 if_metageneration_not_match=if_metageneration_not_match, 

1679 ) 

1680 if force: 

1681 blobs = list( 

1682 self.list_blobs( 

1683 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

1684 client=client, 

1685 timeout=timeout, 

1686 retry=retry, 

1687 versions=True, 

1688 ) 

1689 ) 

1690 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

1691 message = ( 

1692 "Refusing to delete bucket with more than " 

1693 "%d objects. If you actually want to delete " 

1694 "this bucket, please delete the objects " 

1695 "yourself before calling Bucket.delete()." 

1696 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

1697 raise ValueError(message) 

1698 

1699 # Ignore 404 errors on delete. 

1700 self.delete_blobs( 

1701 blobs, 

1702 on_error=lambda blob: None, 

1703 client=client, 

1704 timeout=timeout, 

1705 retry=retry, 

1706 preserve_generation=True, 

1707 ) 

1708 

1709 # We intentionally pass `_target_object=None` since a DELETE 

1710 # request has no response value (whether in a standard request or 

1711 # in a batch request). 

1712 client._delete_resource( 

1713 self.path, 

1714 query_params=query_params, 

1715 timeout=timeout, 

1716 retry=retry, 

1717 _target_object=None, 

1718 ) 

1719 

1720 def delete_blob( 

1721 self, 

1722 blob_name, 

1723 client=None, 

1724 generation=None, 

1725 if_generation_match=None, 

1726 if_generation_not_match=None, 

1727 if_metageneration_match=None, 

1728 if_metageneration_not_match=None, 

1729 timeout=_DEFAULT_TIMEOUT, 

1730 retry=DEFAULT_RETRY, 

1731 ): 

1732 """Deletes a blob from the current bucket. 

1733 

1734 If :attr:`user_project` is set, bills the API request to that project. 

1735 

1736 :type blob_name: str 

1737 :param blob_name: A blob name to delete. 

1738 

1739 :type client: :class:`~google.cloud.storage.client.Client` or 

1740 ``NoneType`` 

1741 :param client: (Optional) The client to use. If not passed, falls back 

1742 to the ``client`` stored on the current bucket. 

1743 

1744 :type generation: long 

1745 :param generation: (Optional) If present, permanently deletes a specific 

1746 revision of this object. 

1747 

1748 :type if_generation_match: long 

1749 :param if_generation_match: 

1750 (Optional) See :ref:`using-if-generation-match` 

1751 

1752 :type if_generation_not_match: long 

1753 :param if_generation_not_match: 

1754 (Optional) See :ref:`using-if-generation-not-match` 

1755 

1756 :type if_metageneration_match: long 

1757 :param if_metageneration_match: 

1758 (Optional) See :ref:`using-if-metageneration-match` 

1759 

1760 :type if_metageneration_not_match: long 

1761 :param if_metageneration_not_match: 

1762 (Optional) See :ref:`using-if-metageneration-not-match` 

1763 

1764 :type timeout: float or tuple 

1765 :param timeout: 

1766 (Optional) The amount of time, in seconds, to wait 

1767 for the server response. See: :ref:`configuring_timeouts` 

1768 

1769 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1770 :param retry: (Optional) How to retry the RPC. A None value will disable 

1771 retries. A google.api_core.retry.Retry value will enable retries, 

1772 and the object will define retriable response codes and errors and 

1773 configure backoff and timeout options. 

1774 

1775 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a 

1776 Retry object and activates it only if certain conditions are met. 

1777 This class exists to provide safe defaults for RPC calls that are 

1778 not technically safe to retry normally (due to potential data 

1779 duplication or other side-effects) but become safe to retry if a 

1780 condition such as if_generation_match is set. 

1781 

1782 See the retry.py source code and docstrings in this package 

1783 (google.cloud.storage.retry) for information on retry types and how 

1784 to configure them. 

1785 

1786 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound 

1787 if the blob isn't found. To suppress 

1788 the exception, use :meth:`delete_blobs` by passing a no-op 

1789 ``on_error`` callback. 

1790 """ 

1791 with create_trace_span(name="Storage.Bucket.deleteBlob"): 

1792 client = self._require_client(client) 

1793 blob = Blob(blob_name, bucket=self, generation=generation) 

1794 

1795 query_params = copy.deepcopy(blob._query_params) 

1796 _add_generation_match_parameters( 

1797 query_params, 

1798 if_generation_match=if_generation_match, 

1799 if_generation_not_match=if_generation_not_match, 

1800 if_metageneration_match=if_metageneration_match, 

1801 if_metageneration_not_match=if_metageneration_not_match, 

1802 ) 

1803 # We intentionally pass `_target_object=None` since a DELETE 

1804 # request has no response value (whether in a standard request or 

1805 # in a batch request). 

1806 client._delete_resource( 

1807 blob.path, 

1808 query_params=query_params, 

1809 timeout=timeout, 

1810 retry=retry, 

1811 _target_object=None, 

1812 ) 

1813 

1814 def delete_blobs( 

1815 self, 

1816 blobs, 

1817 on_error=None, 

1818 client=None, 

1819 preserve_generation=False, 

1820 timeout=_DEFAULT_TIMEOUT, 

1821 if_generation_match=None, 

1822 if_generation_not_match=None, 

1823 if_metageneration_match=None, 

1824 if_metageneration_not_match=None, 

1825 retry=DEFAULT_RETRY, 

1826 ): 

1827 """Deletes a list of blobs from the current bucket. 

1828 

1829 Uses :meth:`delete_blob` to delete each individual blob. 

1830 

1831 By default, any generation information in the list of blobs is ignored, and the 

1832 live versions of all blobs are deleted. Set `preserve_generation` to True 

1833 if blob generation should instead be propagated from the list of blobs. 

1834 

1835 If :attr:`user_project` is set, bills the API request to that project. 

1836 

1837 :type blobs: list 

1838 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or 

1839 blob names to delete. 

1840 

1841 :type on_error: callable 

1842 :param on_error: (Optional) Takes single argument: ``blob``. 

1843 Called once for each blob raising 

1844 :class:`~google.cloud.exceptions.NotFound`; 

1845 otherwise, the exception is propagated. 

1846 Note that ``on_error`` is not supported in a ``Batch`` context. 

1847 

1848 :type client: :class:`~google.cloud.storage.client.Client` 

1849 :param client: (Optional) The client to use. If not passed, falls back 

1850 to the ``client`` stored on the current bucket. 

1851 

1852 :type preserve_generation: bool 

1853 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object, 

1854 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob 

1855 objects can have their generation set in this way. 

1856 Default: False. 

1857 

1858 :type if_generation_match: list of long 

1859 :param if_generation_match: 

1860 (Optional) See :ref:`using-if-generation-match` 

1861 Note that the length of the list must match the length of 

1862 The list must match ``blobs`` item-to-item. 

1863 

1864 :type if_generation_not_match: list of long 

1865 :param if_generation_not_match: 

1866 (Optional) See :ref:`using-if-generation-not-match` 

1867 The list must match ``blobs`` item-to-item. 

1868 

1869 :type if_metageneration_match: list of long 

1870 :param if_metageneration_match: 

1871 (Optional) See :ref:`using-if-metageneration-match` 

1872 The list must match ``blobs`` item-to-item. 

1873 

1874 :type if_metageneration_not_match: list of long 

1875 :param if_metageneration_not_match: 

1876 (Optional) See :ref:`using-if-metageneration-not-match` 

1877 The list must match ``blobs`` item-to-item. 

1878 

1879 :type timeout: float or tuple 

1880 :param timeout: 

1881 (Optional) The amount of time, in seconds, to wait 

1882 for the server response. See: :ref:`configuring_timeouts` 

1883 

1884 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1885 :param retry: (Optional) How to retry the RPC. A None value will disable 

1886 retries. A google.api_core.retry.Retry value will enable retries, 

1887 and the object will define retriable response codes and errors and 

1888 configure backoff and timeout options. 

1889 

1890 A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a 

1891 Retry object and activates it only if certain conditions are met. 

1892 This class exists to provide safe defaults for RPC calls that are 

1893 not technically safe to retry normally (due to potential data 

1894 duplication or other side-effects) but become safe to retry if a 

1895 condition such as if_generation_match is set. 

1896 

1897 See the retry.py source code and docstrings in this package 

1898 (google.cloud.storage.retry) for information on retry types and how 

1899 to configure them. 

1900 

1901 :raises: :class:`~google.cloud.exceptions.NotFound` (if 

1902 `on_error` is not passed). 

1903 """ 

1904 with create_trace_span(name="Storage.Bucket.deleteBlobs"): 

1905 _raise_if_len_differs( 

1906 len(blobs), 

1907 if_generation_match=if_generation_match, 

1908 if_generation_not_match=if_generation_not_match, 

1909 if_metageneration_match=if_metageneration_match, 

1910 if_metageneration_not_match=if_metageneration_not_match, 

1911 ) 

1912 if_generation_match = iter(if_generation_match or []) 

1913 if_generation_not_match = iter(if_generation_not_match or []) 

1914 if_metageneration_match = iter(if_metageneration_match or []) 

1915 if_metageneration_not_match = iter(if_metageneration_not_match or []) 

1916 

1917 for blob in blobs: 

1918 try: 

1919 blob_name = blob 

1920 generation = None 

1921 if not isinstance(blob_name, str): 

1922 blob_name = blob.name 

1923 generation = blob.generation if preserve_generation else None 

1924 

1925 self.delete_blob( 

1926 blob_name, 

1927 client=client, 

1928 generation=generation, 

1929 if_generation_match=next(if_generation_match, None), 

1930 if_generation_not_match=next(if_generation_not_match, None), 

1931 if_metageneration_match=next(if_metageneration_match, None), 

1932 if_metageneration_not_match=next( 

1933 if_metageneration_not_match, None 

1934 ), 

1935 timeout=timeout, 

1936 retry=retry, 

1937 ) 

1938 except NotFound: 

1939 if on_error is not None: 

1940 on_error(blob) 

1941 else: 

1942 raise 

1943 

1944 def copy_blob( 

1945 self, 

1946 blob, 

1947 destination_bucket, 

1948 new_name=None, 

1949 client=None, 

1950 preserve_acl=True, 

1951 source_generation=None, 

1952 if_generation_match=None, 

1953 if_generation_not_match=None, 

1954 if_metageneration_match=None, 

1955 if_metageneration_not_match=None, 

1956 if_source_generation_match=None, 

1957 if_source_generation_not_match=None, 

1958 if_source_metageneration_match=None, 

1959 if_source_metageneration_not_match=None, 

1960 timeout=_DEFAULT_TIMEOUT, 

1961 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1962 ): 

1963 """Copy the given blob to the given bucket, optionally with a new name. 

1964 

1965 If :attr:`user_project` is set, bills the API request to that project. 

1966 

1967 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy) 

1968 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python). 

1969 

1970 :type blob: :class:`google.cloud.storage.blob.Blob` 

1971 :param blob: The blob to be copied. 

1972 

1973 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` 

1974 :param destination_bucket: The bucket into which the blob should be 

1975 copied. 

1976 

1977 :type new_name: str 

1978 :param new_name: (Optional) The new name for the copied file. 

1979 

1980 :type client: :class:`~google.cloud.storage.client.Client` or 

1981 ``NoneType`` 

1982 :param client: (Optional) The client to use. If not passed, falls back 

1983 to the ``client`` stored on the current bucket. 

1984 

1985 :type preserve_acl: bool 

1986 :param preserve_acl: DEPRECATED. This argument is not functional! 

1987 (Optional) Copies ACL from old blob to new blob. 

1988 Default: True. 

1989 Note that ``preserve_acl`` is not supported in a 

1990 ``Batch`` context. 

1991 

1992 :type source_generation: long 

1993 :param source_generation: (Optional) The generation of the blob to be 

1994 copied. 

1995 

1996 :type if_generation_match: long 

1997 :param if_generation_match: 

1998 (Optional) See :ref:`using-if-generation-match` 

1999 Note that the generation to be matched is that of the 

2000 ``destination`` blob. 

2001 

2002 :type if_generation_not_match: long 

2003 :param if_generation_not_match: 

2004 (Optional) See :ref:`using-if-generation-not-match` 

2005 Note that the generation to be matched is that of the 

2006 ``destination`` blob. 

2007 

2008 :type if_metageneration_match: long 

2009 :param if_metageneration_match: 

2010 (Optional) See :ref:`using-if-metageneration-match` 

2011 Note that the metageneration to be matched is that of the 

2012 ``destination`` blob. 

2013 

2014 :type if_metageneration_not_match: long 

2015 :param if_metageneration_not_match: 

2016 (Optional) See :ref:`using-if-metageneration-not-match` 

2017 Note that the metageneration to be matched is that of the 

2018 ``destination`` blob. 

2019 

2020 :type if_source_generation_match: long 

2021 :param if_source_generation_match: 

2022 (Optional) Makes the operation conditional on whether the source 

2023 object's generation matches the given value. 

2024 

2025 :type if_source_generation_not_match: long 

2026 :param if_source_generation_not_match: 

2027 (Optional) Makes the operation conditional on whether the source 

2028 object's generation does not match the given value. 

2029 

2030 :type if_source_metageneration_match: long 

2031 :param if_source_metageneration_match: 

2032 (Optional) Makes the operation conditional on whether the source 

2033 object's current metageneration matches the given value. 

2034 

2035 :type if_source_metageneration_not_match: long 

2036 :param if_source_metageneration_not_match: 

2037 (Optional) Makes the operation conditional on whether the source 

2038 object's current metageneration does not match the given value. 

2039 

2040 :type timeout: float or tuple 

2041 :param timeout: 

2042 (Optional) The amount of time, in seconds, to wait 

2043 for the server response. See: :ref:`configuring_timeouts` 

2044 

2045 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2046 :param retry: 

2047 (Optional) How to retry the RPC. 

2048 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry 

2049 policy which will only enable retries if ``if_generation_match`` or ``generation`` 

2050 is set, in order to ensure requests are idempotent before retrying them. 

2051 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object 

2052 to enable retries regardless of generation precondition setting. 

2053 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2054 

2055 :rtype: :class:`google.cloud.storage.blob.Blob` 

2056 :returns: The new Blob. 

2057 """ 

2058 with create_trace_span(name="Storage.Bucket.copyBlob"): 

2059 client = self._require_client(client) 

2060 query_params = {} 

2061 

2062 if self.user_project is not None: 

2063 query_params["userProject"] = self.user_project 

2064 

2065 if source_generation is not None: 

2066 query_params["sourceGeneration"] = source_generation 

2067 

2068 _add_generation_match_parameters( 

2069 query_params, 

2070 if_generation_match=if_generation_match, 

2071 if_generation_not_match=if_generation_not_match, 

2072 if_metageneration_match=if_metageneration_match, 

2073 if_metageneration_not_match=if_metageneration_not_match, 

2074 if_source_generation_match=if_source_generation_match, 

2075 if_source_generation_not_match=if_source_generation_not_match, 

2076 if_source_metageneration_match=if_source_metageneration_match, 

2077 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2078 ) 

2079 

2080 if new_name is None: 

2081 new_name = blob.name 

2082 

2083 new_blob = Blob(bucket=destination_bucket, name=new_name) 

2084 api_path = blob.path + "/copyTo" + new_blob.path 

2085 copy_result = client._post_resource( 

2086 api_path, 

2087 None, 

2088 query_params=query_params, 

2089 timeout=timeout, 

2090 retry=retry, 

2091 _target_object=new_blob, 

2092 ) 

2093 

2094 if not preserve_acl: 

2095 new_blob.acl.save(acl={}, client=client, timeout=timeout) 

2096 

2097 new_blob._set_properties(copy_result) 

2098 return new_blob 

2099 

2100 def rename_blob( 

2101 self, 

2102 blob, 

2103 new_name, 

2104 client=None, 

2105 if_generation_match=None, 

2106 if_generation_not_match=None, 

2107 if_metageneration_match=None, 

2108 if_metageneration_not_match=None, 

2109 if_source_generation_match=None, 

2110 if_source_generation_not_match=None, 

2111 if_source_metageneration_match=None, 

2112 if_source_metageneration_not_match=None, 

2113 timeout=_DEFAULT_TIMEOUT, 

2114 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2115 ): 

2116 """Rename the given blob using copy and delete operations. 

2117 

2118 If :attr:`user_project` is set, bills the API request to that project. 

2119 

2120 Effectively, copies blob to the same bucket with a new name, then 

2121 deletes the blob. 

2122 

2123 .. warning:: 

2124 

2125 This method will first duplicate the data and then delete the 

2126 old blob. This means that with very large objects renaming 

2127 could be a very (temporarily) costly or a very slow operation. 

2128 If you need more control over the copy and deletion, instead 

2129 use ``google.cloud.storage.blob.Blob.copy_to`` and 

2130 ``google.cloud.storage.blob.Blob.delete`` directly. 

2131 

2132 Also note that this method is not fully supported in a 

2133 ``Batch`` context. 

2134 

2135 :type blob: :class:`google.cloud.storage.blob.Blob` 

2136 :param blob: The blob to be renamed. 

2137 

2138 :type new_name: str 

2139 :param new_name: The new name for this blob. 

2140 

2141 :type client: :class:`~google.cloud.storage.client.Client` or 

2142 ``NoneType`` 

2143 :param client: (Optional) The client to use. If not passed, falls back 

2144 to the ``client`` stored on the current bucket. 

2145 

2146 :type if_generation_match: long 

2147 :param if_generation_match: 

2148 (Optional) See :ref:`using-if-generation-match` 

2149 Note that the generation to be matched is that of the 

2150 ``destination`` blob. 

2151 

2152 :type if_generation_not_match: long 

2153 :param if_generation_not_match: 

2154 (Optional) See :ref:`using-if-generation-not-match` 

2155 Note that the generation to be matched is that of the 

2156 ``destination`` blob. 

2157 

2158 :type if_metageneration_match: long 

2159 :param if_metageneration_match: 

2160 (Optional) See :ref:`using-if-metageneration-match` 

2161 Note that the metageneration to be matched is that of the 

2162 ``destination`` blob. 

2163 

2164 :type if_metageneration_not_match: long 

2165 :param if_metageneration_not_match: 

2166 (Optional) See :ref:`using-if-metageneration-not-match` 

2167 Note that the metageneration to be matched is that of the 

2168 ``destination`` blob. 

2169 

2170 :type if_source_generation_match: long 

2171 :param if_source_generation_match: 

2172 (Optional) Makes the operation conditional on whether the source 

2173 object's generation matches the given value. Also used in the 

2174 (implied) delete request. 

2175 

2176 :type if_source_generation_not_match: long 

2177 :param if_source_generation_not_match: 

2178 (Optional) Makes the operation conditional on whether the source 

2179 object's generation does not match the given value. Also used in 

2180 the (implied) delete request. 

2181 

2182 :type if_source_metageneration_match: long 

2183 :param if_source_metageneration_match: 

2184 (Optional) Makes the operation conditional on whether the source 

2185 object's current metageneration matches the given value. Also used 

2186 in the (implied) delete request. 

2187 

2188 :type if_source_metageneration_not_match: long 

2189 :param if_source_metageneration_not_match: 

2190 (Optional) Makes the operation conditional on whether the source 

2191 object's current metageneration does not match the given value. 

2192 Also used in the (implied) delete request. 

2193 

2194 :type timeout: float or tuple 

2195 :param timeout: 

2196 (Optional) The amount of time, in seconds, to wait 

2197 for the server response. See: :ref:`configuring_timeouts` 

2198 

2199 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2200 :param retry: 

2201 (Optional) How to retry the RPC. 

2202 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, a conditional retry 

2203 policy which will only enable retries if ``if_generation_match`` or ``generation`` 

2204 is set, in order to ensure requests are idempotent before retrying them. 

2205 Change the value to ``DEFAULT_RETRY`` or another `google.api_core.retry.Retry` object 

2206 to enable retries regardless of generation precondition setting. 

2207 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2208 

2209 :rtype: :class:`Blob` 

2210 :returns: The newly-renamed blob. 

2211 """ 

2212 with create_trace_span(name="Storage.Bucket.renameBlob"): 

2213 same_name = blob.name == new_name 

2214 

2215 new_blob = self.copy_blob( 

2216 blob, 

2217 self, 

2218 new_name, 

2219 client=client, 

2220 timeout=timeout, 

2221 if_generation_match=if_generation_match, 

2222 if_generation_not_match=if_generation_not_match, 

2223 if_metageneration_match=if_metageneration_match, 

2224 if_metageneration_not_match=if_metageneration_not_match, 

2225 if_source_generation_match=if_source_generation_match, 

2226 if_source_generation_not_match=if_source_generation_not_match, 

2227 if_source_metageneration_match=if_source_metageneration_match, 

2228 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2229 retry=retry, 

2230 ) 

2231 

2232 if not same_name: 

2233 blob.delete( 

2234 client=client, 

2235 timeout=timeout, 

2236 if_generation_match=if_source_generation_match, 

2237 if_generation_not_match=if_source_generation_not_match, 

2238 if_metageneration_match=if_source_metageneration_match, 

2239 if_metageneration_not_match=if_source_metageneration_not_match, 

2240 retry=retry, 

2241 ) 

2242 return new_blob 

2243 

2244 def move_blob( 

2245 self, 

2246 blob, 

2247 new_name, 

2248 client=None, 

2249 if_generation_match=None, 

2250 if_generation_not_match=None, 

2251 if_metageneration_match=None, 

2252 if_metageneration_not_match=None, 

2253 if_source_generation_match=None, 

2254 if_source_generation_not_match=None, 

2255 if_source_metageneration_match=None, 

2256 if_source_metageneration_not_match=None, 

2257 timeout=_DEFAULT_TIMEOUT, 

2258 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2259 ): 

2260 """Move a blob to a new name atomically. 

2261 

2262 If :attr:`user_project` is set on the bucket, bills the API request to that project. 

2263 

2264 :type blob: :class:`google.cloud.storage.blob.Blob` 

2265 :param blob: The blob to be renamed. 

2266 

2267 :type new_name: str 

2268 :param new_name: The new name for this blob. 

2269 

2270 :type client: :class:`~google.cloud.storage.client.Client` or 

2271 ``NoneType`` 

2272 :param client: (Optional) The client to use. If not passed, falls back 

2273 to the ``client`` stored on the current bucket. 

2274 

2275 :type if_generation_match: int 

2276 :param if_generation_match: 

2277 (Optional) See :ref:`using-if-generation-match` 

2278 Note that the generation to be matched is that of the 

2279 ``destination`` blob. 

2280 

2281 :type if_generation_not_match: int 

2282 :param if_generation_not_match: 

2283 (Optional) See :ref:`using-if-generation-not-match` 

2284 Note that the generation to be matched is that of the 

2285 ``destination`` blob. 

2286 

2287 :type if_metageneration_match: int 

2288 :param if_metageneration_match: 

2289 (Optional) See :ref:`using-if-metageneration-match` 

2290 Note that the metageneration to be matched is that of the 

2291 ``destination`` blob. 

2292 

2293 :type if_metageneration_not_match: int 

2294 :param if_metageneration_not_match: 

2295 (Optional) See :ref:`using-if-metageneration-not-match` 

2296 Note that the metageneration to be matched is that of the 

2297 ``destination`` blob. 

2298 

2299 :type if_source_generation_match: int 

2300 :param if_source_generation_match: 

2301 (Optional) Makes the operation conditional on whether the source 

2302 object's generation matches the given value. 

2303 

2304 :type if_source_generation_not_match: int 

2305 :param if_source_generation_not_match: 

2306 (Optional) Makes the operation conditional on whether the source 

2307 object's generation does not match the given value. 

2308 

2309 :type if_source_metageneration_match: int 

2310 :param if_source_metageneration_match: 

2311 (Optional) Makes the operation conditional on whether the source 

2312 object's current metageneration matches the given value. 

2313 

2314 :type if_source_metageneration_not_match: int 

2315 :param if_source_metageneration_not_match: 

2316 (Optional) Makes the operation conditional on whether the source 

2317 object's current metageneration does not match the given value. 

2318 

2319 :type timeout: float or tuple 

2320 :param timeout: 

2321 (Optional) The amount of time, in seconds, to wait 

2322 for the server response. See: :ref:`configuring_timeouts` 

2323 

2324 :type retry: google.api_core.retry.Retry 

2325 :param retry: 

2326 (Optional) How to retry the RPC. 

2327 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2328 

2329 :rtype: :class:`Blob` 

2330 :returns: The newly-moved blob. 

2331 """ 

2332 with create_trace_span(name="Storage.Bucket.moveBlob"): 

2333 client = self._require_client(client) 

2334 query_params = {} 

2335 

2336 if self.user_project is not None: 

2337 query_params["userProject"] = self.user_project 

2338 

2339 _add_generation_match_parameters( 

2340 query_params, 

2341 if_generation_match=if_generation_match, 

2342 if_generation_not_match=if_generation_not_match, 

2343 if_metageneration_match=if_metageneration_match, 

2344 if_metageneration_not_match=if_metageneration_not_match, 

2345 if_source_generation_match=if_source_generation_match, 

2346 if_source_generation_not_match=if_source_generation_not_match, 

2347 if_source_metageneration_match=if_source_metageneration_match, 

2348 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2349 ) 

2350 

2351 new_blob = Blob(bucket=self, name=new_name) 

2352 api_path = blob.path + "/moveTo/o/" + new_blob.name 

2353 move_result = client._post_resource( 

2354 api_path, 

2355 None, 

2356 query_params=query_params, 

2357 timeout=timeout, 

2358 retry=retry, 

2359 _target_object=new_blob, 

2360 ) 

2361 

2362 new_blob._set_properties(move_result) 

2363 return new_blob 

2364 

2365 def restore_blob( 

2366 self, 

2367 blob_name, 

2368 client=None, 

2369 generation=None, 

2370 copy_source_acl=None, 

2371 projection=None, 

2372 if_generation_match=None, 

2373 if_generation_not_match=None, 

2374 if_metageneration_match=None, 

2375 if_metageneration_not_match=None, 

2376 timeout=_DEFAULT_TIMEOUT, 

2377 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

2378 ): 

2379 """Restores a soft-deleted object. 

2380 

2381 If :attr:`user_project` is set on the bucket, bills the API request to that project. 

2382 

2383 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/restore) 

2384 

2385 :type blob_name: str 

2386 :param blob_name: The name of the blob to be restored. 

2387 

2388 :type client: :class:`~google.cloud.storage.client.Client` 

2389 :param client: (Optional) The client to use. If not passed, falls back 

2390 to the ``client`` stored on the current bucket. 

2391 

2392 :type generation: int 

2393 :param generation: Selects the specific revision of the object. 

2394 

2395 :type copy_source_acl: bool 

2396 :param copy_source_acl: (Optional) If true, copy the soft-deleted object's access controls. 

2397 

2398 :type projection: str 

2399 :param projection: (Optional) Specifies the set of properties to return. 

2400 If used, must be 'full' or 'noAcl'. 

2401 

2402 :type if_generation_match: long 

2403 :param if_generation_match: 

2404 (Optional) See :ref:`using-if-generation-match` 

2405 

2406 :type if_generation_not_match: long 

2407 :param if_generation_not_match: 

2408 (Optional) See :ref:`using-if-generation-not-match` 

2409 

2410 :type if_metageneration_match: long 

2411 :param if_metageneration_match: 

2412 (Optional) See :ref:`using-if-metageneration-match` 

2413 

2414 :type if_metageneration_not_match: long 

2415 :param if_metageneration_not_match: 

2416 (Optional) See :ref:`using-if-metageneration-not-match` 

2417 

2418 :type timeout: float or tuple 

2419 :param timeout: 

2420 (Optional) The amount of time, in seconds, to wait 

2421 for the server response. See: :ref:`configuring_timeouts` 

2422 

2423 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2424 :param retry: 

2425 (Optional) How to retry the RPC. 

2426 The default value is ``DEFAULT_RETRY_IF_GENERATION_SPECIFIED``, which 

2427 only restore operations with ``if_generation_match`` or ``generation`` set 

2428 will be retried. 

2429 

2430 Users can configure non-default retry behavior. A ``None`` value will 

2431 disable retries. A ``DEFAULT_RETRY`` value will enable retries 

2432 even if restore operations are not guaranteed to be idempotent. 

2433 See [Configuring Retries](https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout). 

2434 

2435 :rtype: :class:`google.cloud.storage.blob.Blob` 

2436 :returns: The restored Blob. 

2437 """ 

2438 with create_trace_span(name="Storage.Bucket.restore_blob"): 

2439 client = self._require_client(client) 

2440 query_params = {} 

2441 

2442 if self.user_project is not None: 

2443 query_params["userProject"] = self.user_project 

2444 if generation is not None: 

2445 query_params["generation"] = generation 

2446 if copy_source_acl is not None: 

2447 query_params["copySourceAcl"] = copy_source_acl 

2448 if projection is not None: 

2449 query_params["projection"] = projection 

2450 

2451 _add_generation_match_parameters( 

2452 query_params, 

2453 if_generation_match=if_generation_match, 

2454 if_generation_not_match=if_generation_not_match, 

2455 if_metageneration_match=if_metageneration_match, 

2456 if_metageneration_not_match=if_metageneration_not_match, 

2457 ) 

2458 

2459 blob = Blob(bucket=self, name=blob_name) 

2460 api_response = client._post_resource( 

2461 f"{blob.path}/restore", 

2462 None, 

2463 query_params=query_params, 

2464 timeout=timeout, 

2465 retry=retry, 

2466 ) 

2467 blob._set_properties(api_response) 

2468 return blob 

2469 

2470 @property 

2471 def cors(self): 

2472 """Retrieve or set CORS policies configured for this bucket. 

2473 

2474 See http://www.w3.org/TR/cors/ and 

2475 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2476 

2477 .. note:: 

2478 

2479 The getter for this property returns a list which contains 

2480 *copies* of the bucket's CORS policy mappings. Mutating the list 

2481 or one of its dicts has no effect unless you then re-assign the 

2482 dict via the setter. E.g.: 

2483 

2484 >>> policies = bucket.cors 

2485 >>> policies.append({'origin': '/foo', ...}) 

2486 >>> policies[1]['maxAgeSeconds'] = 3600 

2487 >>> del policies[0] 

2488 >>> bucket.cors = policies 

2489 >>> bucket.update() 

2490 

2491 :setter: Set CORS policies for this bucket. 

2492 :getter: Gets the CORS policies for this bucket. 

2493 

2494 :rtype: list of dictionaries 

2495 :returns: A sequence of mappings describing each CORS policy. 

2496 """ 

2497 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())] 

2498 

2499 @cors.setter 

2500 def cors(self, entries): 

2501 """Set CORS policies configured for this bucket. 

2502 

2503 See http://www.w3.org/TR/cors/ and 

2504 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2505 

2506 :type entries: list of dictionaries 

2507 :param entries: A sequence of mappings describing each CORS policy. 

2508 """ 

2509 self._patch_property("cors", entries) 

2510 

2511 default_event_based_hold = _scalar_property("defaultEventBasedHold") 

2512 """Are uploaded objects automatically placed under an even-based hold? 

2513 

2514 If True, uploaded objects will be placed under an event-based hold to 

2515 be released at a future time. When released an object will then begin 

2516 the retention period determined by the policy retention period for the 

2517 object bucket. 

2518 

2519 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2520 

2521 If the property is not set locally, returns ``None``. 

2522 

2523 :rtype: bool or ``NoneType`` 

2524 """ 

2525 

2526 @property 

2527 def default_kms_key_name(self): 

2528 """Retrieve / set default KMS encryption key for objects in the bucket. 

2529 

2530 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2531 

2532 :setter: Set default KMS encryption key for items in this bucket. 

2533 :getter: Get default KMS encryption key for items in this bucket. 

2534 

2535 :rtype: str 

2536 :returns: Default KMS encryption key, or ``None`` if not set. 

2537 """ 

2538 encryption_config = self._properties.get("encryption", {}) 

2539 return encryption_config.get("defaultKmsKeyName") 

2540 

2541 @default_kms_key_name.setter 

2542 def default_kms_key_name(self, value): 

2543 """Set default KMS encryption key for objects in the bucket. 

2544 

2545 :type value: str or None 

2546 :param value: new KMS key name (None to clear any existing key). 

2547 """ 

2548 encryption_config = self._properties.get("encryption", {}) 

2549 encryption_config["defaultKmsKeyName"] = value 

2550 self._patch_property("encryption", encryption_config) 

2551 

2552 @property 

2553 def labels(self): 

2554 """Retrieve or set labels assigned to this bucket. 

2555 

2556 See 

2557 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2558 

2559 .. note:: 

2560 

2561 The getter for this property returns a dict which is a *copy* 

2562 of the bucket's labels. Mutating that dict has no effect unless 

2563 you then re-assign the dict via the setter. E.g.: 

2564 

2565 >>> labels = bucket.labels 

2566 >>> labels['new_key'] = 'some-label' 

2567 >>> del labels['old_key'] 

2568 >>> bucket.labels = labels 

2569 >>> bucket.update() 

2570 

2571 :setter: Set labels for this bucket. 

2572 :getter: Gets the labels for this bucket. 

2573 

2574 :rtype: :class:`dict` 

2575 :returns: Name-value pairs (string->string) labelling the bucket. 

2576 """ 

2577 labels = self._properties.get("labels") 

2578 if labels is None: 

2579 return {} 

2580 return copy.deepcopy(labels) 

2581 

2582 @labels.setter 

2583 def labels(self, mapping): 

2584 """Set labels assigned to this bucket. 

2585 

2586 See 

2587 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2588 

2589 :type mapping: :class:`dict` 

2590 :param mapping: Name-value pairs (string->string) labelling the bucket. 

2591 """ 

2592 # If any labels have been expressly removed, we need to track this 

2593 # so that a future .patch() call can do the correct thing. 

2594 existing = set([k for k in self.labels.keys()]) 

2595 incoming = set([k for k in mapping.keys()]) 

2596 self._label_removals = self._label_removals.union(existing.difference(incoming)) 

2597 mapping = {k: str(v) for k, v in mapping.items()} 

2598 

2599 # Actually update the labels on the object. 

2600 self._patch_property("labels", copy.deepcopy(mapping)) 

2601 

2602 @property 

2603 def etag(self): 

2604 """Retrieve the ETag for the bucket. 

2605 

2606 See https://tools.ietf.org/html/rfc2616#section-3.11 and 

2607 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2608 

2609 :rtype: str or ``NoneType`` 

2610 :returns: The bucket etag or ``None`` if the bucket's 

2611 resource has not been loaded from the server. 

2612 """ 

2613 return self._properties.get("etag") 

2614 

2615 @property 

2616 def id(self): 

2617 """Retrieve the ID for the bucket. 

2618 

2619 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2620 

2621 :rtype: str or ``NoneType`` 

2622 :returns: The ID of the bucket or ``None`` if the bucket's 

2623 resource has not been loaded from the server. 

2624 """ 

2625 return self._properties.get("id") 

2626 

2627 @property 

2628 def iam_configuration(self): 

2629 """Retrieve IAM configuration for this bucket. 

2630 

2631 :rtype: :class:`IAMConfiguration` 

2632 :returns: an instance for managing the bucket's IAM configuration. 

2633 """ 

2634 info = self._properties.get("iamConfiguration", {}) 

2635 return IAMConfiguration.from_api_repr(info, self) 

2636 

2637 @property 

2638 def soft_delete_policy(self): 

2639 """Retrieve the soft delete policy for this bucket. 

2640 

2641 See https://cloud.google.com/storage/docs/soft-delete 

2642 

2643 :rtype: :class:`SoftDeletePolicy` 

2644 :returns: an instance for managing the bucket's soft delete policy. 

2645 """ 

2646 policy = self._properties.get("softDeletePolicy", {}) 

2647 return SoftDeletePolicy.from_api_repr(policy, self) 

2648 

2649 @property 

2650 def lifecycle_rules(self): 

2651 """Retrieve or set lifecycle rules configured for this bucket. 

2652 

2653 See https://cloud.google.com/storage/docs/lifecycle and 

2654 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2655 

2656 .. note:: 

2657 

2658 The getter for this property returns a generator which yields 

2659 *copies* of the bucket's lifecycle rules mappings. Mutating the 

2660 output dicts has no effect unless you then re-assign the dict via 

2661 the setter. E.g.: 

2662 

2663 >>> rules = list(bucket.lifecycle_rules) 

2664 >>> rules.append({'origin': '/foo', ...}) 

2665 >>> rules[1]['rule']['action']['type'] = 'Delete' 

2666 >>> del rules[0] 

2667 >>> bucket.lifecycle_rules = rules 

2668 >>> bucket.update() 

2669 

2670 :setter: Set lifecycle rules for this bucket. 

2671 :getter: Gets the lifecycle rules for this bucket. 

2672 

2673 :rtype: generator(dict) 

2674 :returns: A sequence of mappings describing each lifecycle rule. 

2675 """ 

2676 info = self._properties.get("lifecycle", {}) 

2677 for rule in info.get("rule", ()): 

2678 action_type = rule["action"]["type"] 

2679 if action_type == "Delete": 

2680 yield LifecycleRuleDelete.from_api_repr(rule) 

2681 elif action_type == "SetStorageClass": 

2682 yield LifecycleRuleSetStorageClass.from_api_repr(rule) 

2683 elif action_type == "AbortIncompleteMultipartUpload": 

2684 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule) 

2685 else: 

2686 warnings.warn( 

2687 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format( 

2688 rule 

2689 ), 

2690 UserWarning, 

2691 stacklevel=1, 

2692 ) 

2693 

2694 @lifecycle_rules.setter 

2695 def lifecycle_rules(self, rules): 

2696 """Set lifecycle rules configured for this bucket. 

2697 

2698 See https://cloud.google.com/storage/docs/lifecycle and 

2699 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2700 

2701 :type rules: list of dictionaries 

2702 :param rules: A sequence of mappings describing each lifecycle rule. 

2703 """ 

2704 rules = [dict(rule) for rule in rules] # Convert helpers if needed 

2705 self._patch_property("lifecycle", {"rule": rules}) 

2706 

2707 def clear_lifecycle_rules(self): 

2708 """Clear lifecycle rules configured for this bucket. 

2709 

2710 See https://cloud.google.com/storage/docs/lifecycle and 

2711 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2712 """ 

2713 self.lifecycle_rules = [] 

2714 

2715 def clear_lifecyle_rules(self): 

2716 """Deprecated alias for clear_lifecycle_rules.""" 

2717 return self.clear_lifecycle_rules() 

2718 

2719 def add_lifecycle_delete_rule(self, **kw): 

2720 """Add a "delete" rule to lifecycle rules configured for this bucket. 

2721 

2722 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2723 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2724 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2725 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python). 

2726 

2727 :type kw: dict 

2728 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2729 """ 

2730 rules = list(self.lifecycle_rules) 

2731 rules.append(LifecycleRuleDelete(**kw)) 

2732 self.lifecycle_rules = rules 

2733 

2734 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): 

2735 """Add a "set storage class" rule to lifecycle rules. 

2736 

2737 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2738 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2739 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2740 

2741 :type storage_class: str, one of :attr:`STORAGE_CLASSES`. 

2742 :param storage_class: new storage class to assign to matching items. 

2743 

2744 :type kw: dict 

2745 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2746 """ 

2747 rules = list(self.lifecycle_rules) 

2748 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) 

2749 self.lifecycle_rules = rules 

2750 

2751 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw): 

2752 """Add a "abort incomplete multipart upload" rule to lifecycle rules. 

2753 

2754 .. note:: 

2755 The "age" lifecycle condition is the only supported condition 

2756 for this rule. 

2757 

2758 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2759 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2760 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2761 

2762 :type kw: dict 

2763 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2764 """ 

2765 rules = list(self.lifecycle_rules) 

2766 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw)) 

2767 self.lifecycle_rules = rules 

2768 

2769 _location = _scalar_property("location") 

2770 

2771 @property 

2772 def location(self): 

2773 """Retrieve location configured for this bucket. 

2774 

2775 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2776 https://cloud.google.com/storage/docs/locations 

2777 

2778 Returns ``None`` if the property has not been set before creation, 

2779 or if the bucket's resource has not been loaded from the server. 

2780 :rtype: str or ``NoneType`` 

2781 """ 

2782 return self._location 

2783 

2784 @location.setter 

2785 def location(self, value): 

2786 """(Deprecated) Set `Bucket.location` 

2787 

2788 This can only be set at bucket **creation** time. 

2789 

2790 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2791 https://cloud.google.com/storage/docs/bucket-locations 

2792 

2793 .. warning:: 

2794 

2795 Assignment to 'Bucket.location' is deprecated, as it is only 

2796 valid before the bucket is created. Instead, pass the location 

2797 to `Bucket.create`. 

2798 """ 

2799 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) 

2800 self._location = value 

2801 

2802 @property 

2803 def data_locations(self): 

2804 """Retrieve the list of regional locations for custom dual-region buckets. 

2805 

2806 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2807 https://cloud.google.com/storage/docs/locations 

2808 

2809 Returns ``None`` if the property has not been set before creation, 

2810 if the bucket's resource has not been loaded from the server, 

2811 or if the bucket is not a dual-regions bucket. 

2812 :rtype: list of str or ``NoneType`` 

2813 """ 

2814 custom_placement_config = self._properties.get("customPlacementConfig", {}) 

2815 return custom_placement_config.get("dataLocations") 

2816 

2817 @property 

2818 def location_type(self): 

2819 """Retrieve the location type for the bucket. 

2820 

2821 See https://cloud.google.com/storage/docs/storage-classes 

2822 

2823 :getter: Gets the the location type for this bucket. 

2824 

2825 :rtype: str or ``NoneType`` 

2826 :returns: 

2827 If set, one of 

2828 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`, 

2829 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or 

2830 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`, 

2831 else ``None``. 

2832 """ 

2833 return self._properties.get("locationType") 

2834 

2835 def get_logging(self): 

2836 """Return info about access logging for this bucket. 

2837 

2838 See https://cloud.google.com/storage/docs/access-logs#status 

2839 

2840 :rtype: dict or None 

2841 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` 

2842 (if logging is enabled), or None (if not). 

2843 """ 

2844 info = self._properties.get("logging") 

2845 return copy.deepcopy(info) 

2846 

2847 def enable_logging(self, bucket_name, object_prefix=""): 

2848 """Enable access logging for this bucket. 

2849 

2850 See https://cloud.google.com/storage/docs/access-logs 

2851 

2852 :type bucket_name: str 

2853 :param bucket_name: name of bucket in which to store access logs 

2854 

2855 :type object_prefix: str 

2856 :param object_prefix: prefix for access log filenames 

2857 """ 

2858 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix} 

2859 self._patch_property("logging", info) 

2860 

2861 def disable_logging(self): 

2862 """Disable access logging for this bucket. 

2863 

2864 See https://cloud.google.com/storage/docs/access-logs#disabling 

2865 """ 

2866 self._patch_property("logging", None) 

2867 

2868 @property 

2869 def metageneration(self): 

2870 """Retrieve the metageneration for the bucket. 

2871 

2872 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2873 

2874 :rtype: int or ``NoneType`` 

2875 :returns: The metageneration of the bucket or ``None`` if the bucket's 

2876 resource has not been loaded from the server. 

2877 """ 

2878 metageneration = self._properties.get("metageneration") 

2879 if metageneration is not None: 

2880 return int(metageneration) 

2881 

2882 @property 

2883 def owner(self): 

2884 """Retrieve info about the owner of the bucket. 

2885 

2886 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2887 

2888 :rtype: dict or ``NoneType`` 

2889 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's 

2890 resource has not been loaded from the server. 

2891 """ 

2892 return copy.deepcopy(self._properties.get("owner")) 

2893 

2894 @property 

2895 def project_number(self): 

2896 """Retrieve the number of the project to which the bucket is assigned. 

2897 

2898 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2899 

2900 :rtype: int or ``NoneType`` 

2901 :returns: The project number that owns the bucket or ``None`` if 

2902 the bucket's resource has not been loaded from the server. 

2903 """ 

2904 project_number = self._properties.get("projectNumber") 

2905 if project_number is not None: 

2906 return int(project_number) 

2907 

2908 @property 

2909 def retention_policy_effective_time(self): 

2910 """Retrieve the effective time of the bucket's retention policy. 

2911 

2912 :rtype: datetime.datetime or ``NoneType`` 

2913 :returns: point-in time at which the bucket's retention policy is 

2914 effective, or ``None`` if the property is not 

2915 set locally. 

2916 """ 

2917 policy = self._properties.get("retentionPolicy") 

2918 if policy is not None: 

2919 timestamp = policy.get("effectiveTime") 

2920 if timestamp is not None: 

2921 return _rfc3339_nanos_to_datetime(timestamp) 

2922 

2923 @property 

2924 def retention_policy_locked(self): 

2925 """Retrieve whthere the bucket's retention policy is locked. 

2926 

2927 :rtype: bool 

2928 :returns: True if the bucket's policy is locked, or else False 

2929 if the policy is not locked, or the property is not 

2930 set locally. 

2931 """ 

2932 policy = self._properties.get("retentionPolicy") 

2933 if policy is not None: 

2934 return policy.get("isLocked") 

2935 

2936 @property 

2937 def retention_period(self): 

2938 """Retrieve or set the retention period for items in the bucket. 

2939 

2940 :rtype: int or ``NoneType`` 

2941 :returns: number of seconds to retain items after upload or release 

2942 from event-based lock, or ``None`` if the property is not 

2943 set locally. 

2944 """ 

2945 policy = self._properties.get("retentionPolicy") 

2946 if policy is not None: 

2947 period = policy.get("retentionPeriod") 

2948 if period is not None: 

2949 return int(period) 

2950 

2951 @retention_period.setter 

2952 def retention_period(self, value): 

2953 """Set the retention period for items in the bucket. 

2954 

2955 :type value: int 

2956 :param value: 

2957 number of seconds to retain items after upload or release from 

2958 event-based lock. 

2959 

2960 :raises ValueError: if the bucket's retention policy is locked. 

2961 """ 

2962 policy = self._properties.setdefault("retentionPolicy", {}) 

2963 if value is not None: 

2964 policy["retentionPeriod"] = str(value) 

2965 else: 

2966 policy = None 

2967 self._patch_property("retentionPolicy", policy) 

2968 

2969 @property 

2970 def self_link(self): 

2971 """Retrieve the URI for the bucket. 

2972 

2973 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2974 

2975 :rtype: str or ``NoneType`` 

2976 :returns: The self link for the bucket or ``None`` if 

2977 the bucket's resource has not been loaded from the server. 

2978 """ 

2979 return self._properties.get("selfLink") 

2980 

2981 @property 

2982 def storage_class(self): 

2983 """Retrieve or set the storage class for the bucket. 

2984 

2985 See https://cloud.google.com/storage/docs/storage-classes 

2986 

2987 :setter: Set the storage class for this bucket. 

2988 :getter: Gets the the storage class for this bucket. 

2989 

2990 :rtype: str or ``NoneType`` 

2991 :returns: 

2992 If set, one of 

2993 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

2994 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

2995 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

2996 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

2997 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

2998 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

2999 or 

3000 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

3001 else ``None``. 

3002 """ 

3003 return self._properties.get("storageClass") 

3004 

3005 @storage_class.setter 

3006 def storage_class(self, value): 

3007 """Set the storage class for the bucket. 

3008 

3009 See https://cloud.google.com/storage/docs/storage-classes 

3010 

3011 :type value: str 

3012 :param value: 

3013 One of 

3014 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

3015 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

3016 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

3017 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

3018 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

3019 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

3020 or 

3021 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

3022 """ 

3023 self._patch_property("storageClass", value) 

3024 

3025 @property 

3026 def time_created(self): 

3027 """Retrieve the timestamp at which the bucket was created. 

3028 

3029 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

3030 

3031 :rtype: :class:`datetime.datetime` or ``NoneType`` 

3032 :returns: Datetime object parsed from RFC3339 valid timestamp, or 

3033 ``None`` if the bucket's resource has not been loaded 

3034 from the server. 

3035 """ 

3036 value = self._properties.get("timeCreated") 

3037 if value is not None: 

3038 return _rfc3339_nanos_to_datetime(value) 

3039 

3040 @property 

3041 def updated(self): 

3042 """Retrieve the timestamp at which the bucket was last updated. 

3043 

3044 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

3045 

3046 :rtype: :class:`datetime.datetime` or ``NoneType`` 

3047 :returns: Datetime object parsed from RFC3339 valid timestamp, or 

3048 ``None`` if the bucket's resource has not been loaded 

3049 from the server. 

3050 """ 

3051 value = self._properties.get("updated") 

3052 if value is not None: 

3053 return _rfc3339_nanos_to_datetime(value) 

3054 

3055 @property 

3056 def versioning_enabled(self): 

3057 """Is versioning enabled for this bucket? 

3058 

3059 See https://cloud.google.com/storage/docs/object-versioning for 

3060 details. 

3061 

3062 :setter: Update whether versioning is enabled for this bucket. 

3063 :getter: Query whether versioning is enabled for this bucket. 

3064 

3065 :rtype: bool 

3066 :returns: True if enabled, else False. 

3067 """ 

3068 versioning = self._properties.get("versioning", {}) 

3069 return versioning.get("enabled", False) 

3070 

3071 @versioning_enabled.setter 

3072 def versioning_enabled(self, value): 

3073 """Enable versioning for this bucket. 

3074 

3075 See https://cloud.google.com/storage/docs/object-versioning for 

3076 details. 

3077 

3078 :type value: convertible to boolean 

3079 :param value: should versioning be enabled for the bucket? 

3080 """ 

3081 self._patch_property("versioning", {"enabled": bool(value)}) 

3082 

3083 @property 

3084 def requester_pays(self): 

3085 """Does the requester pay for API requests for this bucket? 

3086 

3087 See https://cloud.google.com/storage/docs/requester-pays for 

3088 details. 

3089 

3090 :setter: Update whether requester pays for this bucket. 

3091 :getter: Query whether requester pays for this bucket. 

3092 

3093 :rtype: bool 

3094 :returns: True if requester pays for API requests for the bucket, 

3095 else False. 

3096 """ 

3097 versioning = self._properties.get("billing", {}) 

3098 return versioning.get("requesterPays", False) 

3099 

3100 @requester_pays.setter 

3101 def requester_pays(self, value): 

3102 """Update whether requester pays for API requests for this bucket. 

3103 

3104 See https://cloud.google.com/storage/docs/using-requester-pays for 

3105 details. 

3106 

3107 :type value: convertible to boolean 

3108 :param value: should requester pay for API requests for the bucket? 

3109 """ 

3110 self._patch_property("billing", {"requesterPays": bool(value)}) 

3111 

3112 @property 

3113 def autoclass_enabled(self): 

3114 """Whether Autoclass is enabled for this bucket. 

3115 

3116 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3117 

3118 :setter: Update whether autoclass is enabled for this bucket. 

3119 :getter: Query whether autoclass is enabled for this bucket. 

3120 

3121 :rtype: bool 

3122 :returns: True if enabled, else False. 

3123 """ 

3124 autoclass = self._properties.get("autoclass", {}) 

3125 return autoclass.get("enabled", False) 

3126 

3127 @autoclass_enabled.setter 

3128 def autoclass_enabled(self, value): 

3129 """Enable or disable Autoclass at the bucket-level. 

3130 

3131 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3132 

3133 :type value: convertible to boolean 

3134 :param value: If true, enable Autoclass for this bucket. 

3135 If false, disable Autoclass for this bucket. 

3136 """ 

3137 autoclass = self._properties.get("autoclass", {}) 

3138 autoclass["enabled"] = bool(value) 

3139 self._patch_property("autoclass", autoclass) 

3140 

3141 @property 

3142 def autoclass_toggle_time(self): 

3143 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket. 

3144 :rtype: datetime.datetime or ``NoneType`` 

3145 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally. 

3146 """ 

3147 autoclass = self._properties.get("autoclass") 

3148 if autoclass is not None: 

3149 timestamp = autoclass.get("toggleTime") 

3150 if timestamp is not None: 

3151 return _rfc3339_nanos_to_datetime(timestamp) 

3152 

3153 @property 

3154 def autoclass_terminal_storage_class(self): 

3155 """The storage class that objects in an Autoclass bucket eventually transition to if 

3156 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE. 

3157 

3158 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3159 

3160 :setter: Set the terminal storage class for Autoclass configuration. 

3161 :getter: Get the terminal storage class for Autoclass configuration. 

3162 

3163 :rtype: str 

3164 :returns: The terminal storage class if Autoclass is enabled, else ``None``. 

3165 """ 

3166 autoclass = self._properties.get("autoclass", {}) 

3167 return autoclass.get("terminalStorageClass", None) 

3168 

3169 @autoclass_terminal_storage_class.setter 

3170 def autoclass_terminal_storage_class(self, value): 

3171 """The storage class that objects in an Autoclass bucket eventually transition to if 

3172 they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE. 

3173 

3174 See https://cloud.google.com/storage/docs/using-autoclass for details. 

3175 

3176 :type value: str 

3177 :param value: The only valid values are `"NEARLINE"` and `"ARCHIVE"`. 

3178 """ 

3179 autoclass = self._properties.get("autoclass", {}) 

3180 autoclass["terminalStorageClass"] = value 

3181 self._patch_property("autoclass", autoclass) 

3182 

3183 @property 

3184 def autoclass_terminal_storage_class_update_time(self): 

3185 """The time at which the Autoclass terminal_storage_class field was last updated for this bucket 

3186 :rtype: datetime.datetime or ``NoneType`` 

3187 :returns: point-in time at which the bucket's terminal_storage_class is last updated, or ``None`` if the property is not set locally. 

3188 """ 

3189 autoclass = self._properties.get("autoclass") 

3190 if autoclass is not None: 

3191 timestamp = autoclass.get("terminalStorageClassUpdateTime") 

3192 if timestamp is not None: 

3193 return _rfc3339_nanos_to_datetime(timestamp) 

3194 

3195 @property 

3196 def object_retention_mode(self): 

3197 """Retrieve the object retention mode set on the bucket. 

3198 

3199 :rtype: str 

3200 :returns: When set to Enabled, retention configurations can be 

3201 set on objects in the bucket. 

3202 """ 

3203 object_retention = self._properties.get("objectRetention") 

3204 if object_retention is not None: 

3205 return object_retention.get("mode") 

3206 

3207 @property 

3208 def hierarchical_namespace_enabled(self): 

3209 """Whether hierarchical namespace is enabled for this bucket. 

3210 

3211 :setter: Update whether hierarchical namespace is enabled for this bucket. 

3212 :getter: Query whether hierarchical namespace is enabled for this bucket. 

3213 

3214 :rtype: bool 

3215 :returns: True if enabled, else False. 

3216 """ 

3217 hns = self._properties.get("hierarchicalNamespace", {}) 

3218 return hns.get("enabled") 

3219 

3220 @hierarchical_namespace_enabled.setter 

3221 def hierarchical_namespace_enabled(self, value): 

3222 """Enable or disable hierarchical namespace at the bucket-level. 

3223 

3224 :type value: convertible to boolean 

3225 :param value: If true, enable hierarchical namespace for this bucket. 

3226 If false, disable hierarchical namespace for this bucket. 

3227 

3228 .. note:: 

3229 To enable hierarchical namespace, you must set it at bucket creation time. 

3230 Currently, hierarchical namespace configuration cannot be changed after bucket creation. 

3231 """ 

3232 hns = self._properties.get("hierarchicalNamespace", {}) 

3233 hns["enabled"] = bool(value) 

3234 self._patch_property("hierarchicalNamespace", hns) 

3235 

3236 def configure_website(self, main_page_suffix=None, not_found_page=None): 

3237 """Configure website-related properties. 

3238 

3239 See https://cloud.google.com/storage/docs/static-website 

3240 

3241 .. note:: 

3242 This configures the bucket's website-related properties,controlling how 

3243 the service behaves when accessing bucket contents as a web site. 

3244 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and 

3245 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python) 

3246 for more information. 

3247 

3248 :type main_page_suffix: str 

3249 :param main_page_suffix: The page to use as the main page 

3250 of a directory. 

3251 Typically something like index.html. 

3252 

3253 :type not_found_page: str 

3254 :param not_found_page: The file to use when a page isn't found. 

3255 """ 

3256 data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page} 

3257 self._patch_property("website", data) 

3258 

3259 def disable_website(self): 

3260 """Disable the website configuration for this bucket. 

3261 

3262 This is really just a shortcut for setting the website-related 

3263 attributes to ``None``. 

3264 """ 

3265 return self.configure_website(None, None) 

3266 

3267 def get_iam_policy( 

3268 self, 

3269 client=None, 

3270 requested_policy_version=None, 

3271 timeout=_DEFAULT_TIMEOUT, 

3272 retry=DEFAULT_RETRY, 

3273 ): 

3274 """Retrieve the IAM policy for the bucket. 

3275 

3276 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy) 

3277 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python). 

3278 

3279 If :attr:`user_project` is set, bills the API request to that project. 

3280 

3281 :type client: :class:`~google.cloud.storage.client.Client` or 

3282 ``NoneType`` 

3283 :param client: (Optional) The client to use. If not passed, falls back 

3284 to the ``client`` stored on the current bucket. 

3285 

3286 :type requested_policy_version: int or ``NoneType`` 

3287 :param requested_policy_version: (Optional) The version of IAM policies to request. 

3288 If a policy with a condition is requested without 

3289 setting this, the server will return an error. 

3290 This must be set to a value of 3 to retrieve IAM 

3291 policies containing conditions. This is to prevent 

3292 client code that isn't aware of IAM conditions from 

3293 interpreting and modifying policies incorrectly. 

3294 The service might return a policy with version lower 

3295 than the one that was requested, based on the 

3296 feature syntax in the policy fetched. 

3297 

3298 :type timeout: float or tuple 

3299 :param timeout: 

3300 (Optional) The amount of time, in seconds, to wait 

3301 for the server response. See: :ref:`configuring_timeouts` 

3302 

3303 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3304 :param retry: 

3305 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3306 

3307 :rtype: :class:`google.api_core.iam.Policy` 

3308 :returns: the policy instance, based on the resource returned from 

3309 the ``getIamPolicy`` API request. 

3310 """ 

3311 with create_trace_span(name="Storage.Bucket.getIamPolicy"): 

3312 client = self._require_client(client) 

3313 query_params = {} 

3314 

3315 if self.user_project is not None: 

3316 query_params["userProject"] = self.user_project 

3317 

3318 if requested_policy_version is not None: 

3319 query_params["optionsRequestedPolicyVersion"] = requested_policy_version 

3320 

3321 info = client._get_resource( 

3322 f"{self.path}/iam", 

3323 query_params=query_params, 

3324 timeout=timeout, 

3325 retry=retry, 

3326 _target_object=None, 

3327 ) 

3328 return Policy.from_api_repr(info) 

3329 

3330 def set_iam_policy( 

3331 self, 

3332 policy, 

3333 client=None, 

3334 timeout=_DEFAULT_TIMEOUT, 

3335 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON, 

3336 ): 

3337 """Update the IAM policy for the bucket. 

3338 

3339 See 

3340 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy 

3341 

3342 If :attr:`user_project` is set, bills the API request to that project. 

3343 

3344 :type policy: :class:`google.api_core.iam.Policy` 

3345 :param policy: policy instance used to update bucket's IAM policy. 

3346 

3347 :type client: :class:`~google.cloud.storage.client.Client` or 

3348 ``NoneType`` 

3349 :param client: (Optional) The client to use. If not passed, falls back 

3350 to the ``client`` stored on the current bucket. 

3351 

3352 :type timeout: float or tuple 

3353 :param timeout: 

3354 (Optional) The amount of time, in seconds, to wait 

3355 for the server response. See: :ref:`configuring_timeouts` 

3356 

3357 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3358 :param retry: 

3359 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3360 

3361 :rtype: :class:`google.api_core.iam.Policy` 

3362 :returns: the policy instance, based on the resource returned from 

3363 the ``setIamPolicy`` API request. 

3364 """ 

3365 with create_trace_span(name="Storage.Bucket.setIamPolicy"): 

3366 client = self._require_client(client) 

3367 query_params = {} 

3368 

3369 if self.user_project is not None: 

3370 query_params["userProject"] = self.user_project 

3371 

3372 path = f"{self.path}/iam" 

3373 resource = policy.to_api_repr() 

3374 resource["resourceId"] = self.path 

3375 

3376 info = client._put_resource( 

3377 path, 

3378 resource, 

3379 query_params=query_params, 

3380 timeout=timeout, 

3381 retry=retry, 

3382 _target_object=None, 

3383 ) 

3384 

3385 return Policy.from_api_repr(info) 

3386 

3387 def test_iam_permissions( 

3388 self, permissions, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

3389 ): 

3390 """API call: test permissions 

3391 

3392 See 

3393 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions 

3394 

3395 If :attr:`user_project` is set, bills the API request to that project. 

3396 

3397 :type permissions: list of string 

3398 :param permissions: the permissions to check 

3399 

3400 :type client: :class:`~google.cloud.storage.client.Client` or 

3401 ``NoneType`` 

3402 :param client: (Optional) The client to use. If not passed, falls back 

3403 to the ``client`` stored on the current bucket. 

3404 

3405 :type timeout: float or tuple 

3406 :param timeout: 

3407 (Optional) The amount of time, in seconds, to wait 

3408 for the server response. See: :ref:`configuring_timeouts` 

3409 

3410 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3411 :param retry: 

3412 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3413 

3414 :rtype: list of string 

3415 :returns: the permissions returned by the ``testIamPermissions`` API 

3416 request. 

3417 """ 

3418 with create_trace_span(name="Storage.Bucket.testIamPermissions"): 

3419 client = self._require_client(client) 

3420 query_params = {"permissions": permissions} 

3421 

3422 if self.user_project is not None: 

3423 query_params["userProject"] = self.user_project 

3424 

3425 path = f"{self.path}/iam/testPermissions" 

3426 resp = client._get_resource( 

3427 path, 

3428 query_params=query_params, 

3429 timeout=timeout, 

3430 retry=retry, 

3431 _target_object=None, 

3432 ) 

3433 return resp.get("permissions", []) 

3434 

3435 def make_public( 

3436 self, 

3437 recursive=False, 

3438 future=False, 

3439 client=None, 

3440 timeout=_DEFAULT_TIMEOUT, 

3441 if_metageneration_match=None, 

3442 if_metageneration_not_match=None, 

3443 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

3444 ): 

3445 """Update bucket's ACL, granting read access to anonymous users. 

3446 

3447 :type recursive: bool 

3448 :param recursive: If True, this will make all blobs inside the bucket 

3449 public as well. 

3450 

3451 :type future: bool 

3452 :param future: If True, this will make all objects created in the 

3453 future public as well. 

3454 

3455 :type client: :class:`~google.cloud.storage.client.Client` or 

3456 ``NoneType`` 

3457 :param client: (Optional) The client to use. If not passed, falls back 

3458 to the ``client`` stored on the current bucket. 

3459 :type timeout: float or tuple 

3460 :param timeout: 

3461 (Optional) The amount of time, in seconds, to wait 

3462 for the server response. See: :ref:`configuring_timeouts` 

3463 

3464 :type if_metageneration_match: long 

3465 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

3466 blob's current metageneration matches the given value. 

3467 

3468 :type if_metageneration_not_match: long 

3469 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

3470 blob's current metageneration does not match the given value. 

3471 

3472 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3473 :param retry: 

3474 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3475 

3476 :raises ValueError: 

3477 If ``recursive`` is True, and the bucket contains more than 256 

3478 blobs. This is to prevent extremely long runtime of this 

3479 method. For such buckets, iterate over the blobs returned by 

3480 :meth:`list_blobs` and call 

3481 :meth:`~google.cloud.storage.blob.Blob.make_public` 

3482 for each blob. 

3483 """ 

3484 with create_trace_span(name="Storage.Bucket.makePublic"): 

3485 self.acl.all().grant_read() 

3486 self.acl.save( 

3487 client=client, 

3488 timeout=timeout, 

3489 if_metageneration_match=if_metageneration_match, 

3490 if_metageneration_not_match=if_metageneration_not_match, 

3491 retry=retry, 

3492 ) 

3493 

3494 if future: 

3495 doa = self.default_object_acl 

3496 if not doa.loaded: 

3497 doa.reload(client=client, timeout=timeout) 

3498 doa.all().grant_read() 

3499 doa.save( 

3500 client=client, 

3501 timeout=timeout, 

3502 if_metageneration_match=if_metageneration_match, 

3503 if_metageneration_not_match=if_metageneration_not_match, 

3504 retry=retry, 

3505 ) 

3506 

3507 if recursive: 

3508 blobs = list( 

3509 self.list_blobs( 

3510 projection="full", 

3511 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

3512 client=client, 

3513 timeout=timeout, 

3514 ) 

3515 ) 

3516 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

3517 message = ( 

3518 "Refusing to make public recursively with more than " 

3519 "%d objects. If you actually want to make every object " 

3520 "in this bucket public, iterate through the blobs " 

3521 "returned by 'Bucket.list_blobs()' and call " 

3522 "'make_public' on each one." 

3523 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

3524 raise ValueError(message) 

3525 

3526 for blob in blobs: 

3527 blob.acl.all().grant_read() 

3528 blob.acl.save( 

3529 client=client, 

3530 timeout=timeout, 

3531 ) 

3532 

3533 def make_private( 

3534 self, 

3535 recursive=False, 

3536 future=False, 

3537 client=None, 

3538 timeout=_DEFAULT_TIMEOUT, 

3539 if_metageneration_match=None, 

3540 if_metageneration_not_match=None, 

3541 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

3542 ): 

3543 """Update bucket's ACL, revoking read access for anonymous users. 

3544 

3545 :type recursive: bool 

3546 :param recursive: If True, this will make all blobs inside the bucket 

3547 private as well. 

3548 

3549 :type future: bool 

3550 :param future: If True, this will make all objects created in the 

3551 future private as well. 

3552 

3553 :type client: :class:`~google.cloud.storage.client.Client` or 

3554 ``NoneType`` 

3555 :param client: (Optional) The client to use. If not passed, falls back 

3556 to the ``client`` stored on the current bucket. 

3557 

3558 :type timeout: float or tuple 

3559 :param timeout: 

3560 (Optional) The amount of time, in seconds, to wait 

3561 for the server response. See: :ref:`configuring_timeouts` 

3562 

3563 :type if_metageneration_match: long 

3564 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

3565 blob's current metageneration matches the given value. 

3566 :type if_metageneration_not_match: long 

3567 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

3568 blob's current metageneration does not match the given value. 

3569 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3570 :param retry: 

3571 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3572 

3573 :raises ValueError: 

3574 If ``recursive`` is True, and the bucket contains more than 256 

3575 blobs. This is to prevent extremely long runtime of this 

3576 method. For such buckets, iterate over the blobs returned by 

3577 :meth:`list_blobs` and call 

3578 :meth:`~google.cloud.storage.blob.Blob.make_private` 

3579 for each blob. 

3580 """ 

3581 with create_trace_span(name="Storage.Bucket.makePrivate"): 

3582 self.acl.all().revoke_read() 

3583 self.acl.save( 

3584 client=client, 

3585 timeout=timeout, 

3586 if_metageneration_match=if_metageneration_match, 

3587 if_metageneration_not_match=if_metageneration_not_match, 

3588 retry=retry, 

3589 ) 

3590 

3591 if future: 

3592 doa = self.default_object_acl 

3593 if not doa.loaded: 

3594 doa.reload(client=client, timeout=timeout) 

3595 doa.all().revoke_read() 

3596 doa.save( 

3597 client=client, 

3598 timeout=timeout, 

3599 if_metageneration_match=if_metageneration_match, 

3600 if_metageneration_not_match=if_metageneration_not_match, 

3601 retry=retry, 

3602 ) 

3603 

3604 if recursive: 

3605 blobs = list( 

3606 self.list_blobs( 

3607 projection="full", 

3608 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

3609 client=client, 

3610 timeout=timeout, 

3611 ) 

3612 ) 

3613 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

3614 message = ( 

3615 "Refusing to make private recursively with more than " 

3616 "%d objects. If you actually want to make every object " 

3617 "in this bucket private, iterate through the blobs " 

3618 "returned by 'Bucket.list_blobs()' and call " 

3619 "'make_private' on each one." 

3620 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

3621 raise ValueError(message) 

3622 

3623 for blob in blobs: 

3624 blob.acl.all().revoke_read() 

3625 blob.acl.save(client=client, timeout=timeout) 

3626 

3627 def generate_upload_policy(self, conditions, expiration=None, client=None): 

3628 """Create a signed upload policy for uploading objects. 

3629 

3630 This method generates and signs a policy document. You can use 

3631 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms) 

3632 to allow visitors to a website to upload files to 

3633 Google Cloud Storage without giving them direct write access. 

3634 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python). 

3635 

3636 :type expiration: datetime 

3637 :param expiration: (Optional) Expiration in UTC. If not specified, the 

3638 policy will expire in 1 hour. 

3639 

3640 :type conditions: list 

3641 :param conditions: A list of conditions as described in the 

3642 `policy documents` documentation. 

3643 

3644 :type client: :class:`~google.cloud.storage.client.Client` 

3645 :param client: (Optional) The client to use. If not passed, falls back 

3646 to the ``client`` stored on the current bucket. 

3647 

3648 :rtype: dict 

3649 :returns: A dictionary of (form field name, form field value) of form 

3650 fields that should be added to your HTML upload form in order 

3651 to attach the signature. 

3652 """ 

3653 client = self._require_client(client) 

3654 credentials = client._credentials 

3655 _signing.ensure_signed_credentials(credentials) 

3656 

3657 if expiration is None: 

3658 expiration = _NOW(_UTC).replace(tzinfo=None) + datetime.timedelta(hours=1) 

3659 

3660 conditions = conditions + [{"bucket": self.name}] 

3661 

3662 policy_document = { 

3663 "expiration": _datetime_to_rfc3339(expiration), 

3664 "conditions": conditions, 

3665 } 

3666 

3667 encoded_policy_document = base64.b64encode( 

3668 json.dumps(policy_document).encode("utf-8") 

3669 ) 

3670 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document)) 

3671 

3672 fields = { 

3673 "bucket": self.name, 

3674 "GoogleAccessId": credentials.signer_email, 

3675 "policy": encoded_policy_document.decode("utf-8"), 

3676 "signature": signature.decode("utf-8"), 

3677 } 

3678 

3679 return fields 

3680 

3681 def lock_retention_policy( 

3682 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

3683 ): 

3684 """Lock the bucket's retention policy. 

3685 

3686 :type client: :class:`~google.cloud.storage.client.Client` or 

3687 ``NoneType`` 

3688 :param client: (Optional) The client to use. If not passed, falls back 

3689 to the ``client`` stored on the blob's bucket. 

3690 

3691 :type timeout: float or tuple 

3692 :param timeout: 

3693 (Optional) The amount of time, in seconds, to wait 

3694 for the server response. See: :ref:`configuring_timeouts` 

3695 

3696 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3697 :param retry: 

3698 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3699 

3700 :raises ValueError: 

3701 if the bucket has no metageneration (i.e., new or never reloaded); 

3702 if the bucket has no retention policy assigned; 

3703 if the bucket's retention policy is already locked. 

3704 """ 

3705 with create_trace_span(name="Storage.Bucket.lockRetentionPolicy"): 

3706 if "metageneration" not in self._properties: 

3707 raise ValueError( 

3708 "Bucket has no retention policy assigned: try 'reload'?" 

3709 ) 

3710 

3711 policy = self._properties.get("retentionPolicy") 

3712 

3713 if policy is None: 

3714 raise ValueError( 

3715 "Bucket has no retention policy assigned: try 'reload'?" 

3716 ) 

3717 

3718 if policy.get("isLocked"): 

3719 raise ValueError("Bucket's retention policy is already locked.") 

3720 

3721 client = self._require_client(client) 

3722 

3723 query_params = {"ifMetagenerationMatch": self.metageneration} 

3724 

3725 if self.user_project is not None: 

3726 query_params["userProject"] = self.user_project 

3727 

3728 path = f"/b/{self.name}/lockRetentionPolicy" 

3729 api_response = client._post_resource( 

3730 path, 

3731 None, 

3732 query_params=query_params, 

3733 timeout=timeout, 

3734 retry=retry, 

3735 _target_object=self, 

3736 ) 

3737 self._set_properties(api_response) 

3738 

3739 def generate_signed_url( 

3740 self, 

3741 expiration=None, 

3742 api_access_endpoint=None, 

3743 method="GET", 

3744 headers=None, 

3745 query_parameters=None, 

3746 client=None, 

3747 credentials=None, 

3748 version=None, 

3749 virtual_hosted_style=False, 

3750 bucket_bound_hostname=None, 

3751 scheme="http", 

3752 ): 

3753 """Generates a signed URL for this bucket. 

3754 

3755 .. note:: 

3756 

3757 If you are on Google Compute Engine, you can't generate a signed 

3758 URL using GCE service account. If you'd like to be able to generate 

3759 a signed URL from GCE, you can use a standard service account from a 

3760 JSON file rather than a GCE service account. 

3761 

3762 If you have a bucket that you want to allow access to for a set 

3763 amount of time, you can use this method to generate a URL that 

3764 is only valid within a certain time period. 

3765 

3766 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`, 

3767 ``https`` works only if using a ``CDN``. 

3768 

3769 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] 

3770 :param expiration: Point in time when the signed URL should expire. If 

3771 a ``datetime`` instance is passed without an explicit 

3772 ``tzinfo`` set, it will be assumed to be ``UTC``. 

3773 

3774 :type api_access_endpoint: str 

3775 :param api_access_endpoint: (Optional) URI base, for instance 

3776 "https://storage.googleapis.com". If not specified, the client's 

3777 api_endpoint will be used. Incompatible with bucket_bound_hostname. 

3778 

3779 :type method: str 

3780 :param method: The HTTP verb that will be used when requesting the URL. 

3781 

3782 :type headers: dict 

3783 :param headers: 

3784 (Optional) Additional HTTP headers to be included as part of the 

3785 signed URLs. See: 

3786 https://cloud.google.com/storage/docs/xml-api/reference-headers 

3787 Requests using the signed URL *must* pass the specified header 

3788 (name and value) with each request for the URL. 

3789 

3790 :type query_parameters: dict 

3791 :param query_parameters: 

3792 (Optional) Additional query parameters to be included as part of the 

3793 signed URLs. See: 

3794 https://cloud.google.com/storage/docs/xml-api/reference-headers#query 

3795 

3796 :type client: :class:`~google.cloud.storage.client.Client` or 

3797 ``NoneType`` 

3798 :param client: (Optional) The client to use. If not passed, falls back 

3799 to the ``client`` stored on the blob's bucket. 

3800 

3801 :type credentials: :class:`google.auth.credentials.Credentials` or 

3802 :class:`NoneType` 

3803 :param credentials: The authorization credentials to attach to requests. 

3804 These credentials identify this application to the service. 

3805 If none are specified, the client will attempt to ascertain 

3806 the credentials from the environment. 

3807 

3808 :type version: str 

3809 :param version: (Optional) The version of signed credential to create. 

3810 Must be one of 'v2' | 'v4'. 

3811 

3812 :type virtual_hosted_style: bool 

3813 :param virtual_hosted_style: 

3814 (Optional) If true, then construct the URL relative the bucket's 

3815 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'. 

3816 Incompatible with bucket_bound_hostname. 

3817 

3818 :type bucket_bound_hostname: str 

3819 :param bucket_bound_hostname: 

3820 (Optional) If passed, then construct the URL relative to the bucket-bound hostname. 

3821 Value can be a bare or with scheme, e.g., 'example.com' or 'http://example.com'. 

3822 Incompatible with api_access_endpoint and virtual_hosted_style. 

3823 See: https://cloud.google.com/storage/docs/request-endpoints#cname 

3824 

3825 :type scheme: str 

3826 :param scheme: 

3827 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use 

3828 this value as the scheme. ``https`` will work only when using a CDN. 

3829 Defaults to ``"http"``. 

3830 

3831 :raises: :exc:`ValueError` when version is invalid or mutually exclusive arguments are used. 

3832 :raises: :exc:`TypeError` when expiration is not a valid type. 

3833 :raises: :exc:`AttributeError` if credentials is not an instance 

3834 of :class:`google.auth.credentials.Signing`. 

3835 

3836 :rtype: str 

3837 :returns: A signed URL you can use to access the resource 

3838 until expiration. 

3839 """ 

3840 if version is None: 

3841 version = "v2" 

3842 elif version not in ("v2", "v4"): 

3843 raise ValueError("'version' must be either 'v2' or 'v4'") 

3844 

3845 if ( 

3846 api_access_endpoint is not None or virtual_hosted_style 

3847 ) and bucket_bound_hostname: 

3848 raise ValueError( 

3849 "The bucket_bound_hostname argument is not compatible with " 

3850 "either api_access_endpoint or virtual_hosted_style." 

3851 ) 

3852 

3853 if api_access_endpoint is None: 

3854 client = self._require_client(client) 

3855 api_access_endpoint = client.api_endpoint 

3856 

3857 # If you are on Google Compute Engine, you can't generate a signed URL 

3858 # using GCE service account. 

3859 # See https://github.com/googleapis/google-auth-library-python/issues/50 

3860 if virtual_hosted_style: 

3861 api_access_endpoint = _virtual_hosted_style_base_url( 

3862 api_access_endpoint, self.name 

3863 ) 

3864 resource = "/" 

3865 elif bucket_bound_hostname: 

3866 api_access_endpoint = _bucket_bound_hostname_url( 

3867 bucket_bound_hostname, scheme 

3868 ) 

3869 resource = "/" 

3870 else: 

3871 resource = f"/{self.name}" 

3872 

3873 if credentials is None: 

3874 client = self._require_client(client) # May be redundant, but that's ok. 

3875 credentials = client._credentials 

3876 

3877 if version == "v2": 

3878 helper = generate_signed_url_v2 

3879 else: 

3880 helper = generate_signed_url_v4 

3881 

3882 return helper( 

3883 credentials, 

3884 resource=resource, 

3885 expiration=expiration, 

3886 api_access_endpoint=api_access_endpoint, 

3887 method=method.upper(), 

3888 headers=headers, 

3889 query_parameters=query_parameters, 

3890 ) 

3891 

3892 @property 

3893 def ip_filter(self): 

3894 """Retrieve or set the IP Filter configuration for this bucket. 

3895 

3896 See https://cloud.google.com/storage/docs/ip-filtering-overview and 

3897 https://cloud.google.com/storage/docs/json_api/v1/buckets#ipFilter 

3898 

3899 .. note:: 

3900 The getter for this property returns an 

3901 :class:`~google.cloud.storage.ip_filter.IPFilter` object, which is a 

3902 structured representation of the bucket's IP filter configuration. 

3903 Modifying the returned object has no effect. To update the bucket's 

3904 IP filter, create and assign a new ``IPFilter`` object to this 

3905 property and then call 

3906 :meth:`~google.cloud.storage.bucket.Bucket.patch`. 

3907 

3908 .. code-block:: python 

3909 

3910 from google.cloud.storage.ip_filter import ( 

3911 IPFilter, 

3912 PublicNetworkSource, 

3913 ) 

3914 

3915 ip_filter = IPFilter() 

3916 ip_filter.mode = "Enabled" 

3917 ip_filter.public_network_source = PublicNetworkSource( 

3918 allowed_ip_cidr_ranges=["203.0.113.5/32"] 

3919 ) 

3920 bucket.ip_filter = ip_filter 

3921 bucket.patch() 

3922 

3923 :setter: Set the IP Filter configuration for this bucket. 

3924 :getter: Gets the IP Filter configuration for this bucket. 

3925 

3926 :rtype: :class:`~google.cloud.storage.ip_filter.IPFilter` or ``NoneType`` 

3927 :returns: 

3928 An ``IPFilter`` object representing the configuration, or ``None`` 

3929 if no filter is configured. 

3930 """ 

3931 resource = self._properties.get(_IP_FILTER_PROPERTY) 

3932 if resource: 

3933 return IPFilter._from_api_resource(resource) 

3934 return None 

3935 

3936 @ip_filter.setter 

3937 def ip_filter(self, value): 

3938 if value is None: 

3939 self._patch_property(_IP_FILTER_PROPERTY, None) 

3940 elif isinstance(value, IPFilter): 

3941 self._patch_property(_IP_FILTER_PROPERTY, value._to_api_resource()) 

3942 else: 

3943 self._patch_property(_IP_FILTER_PROPERTY, value) 

3944 

3945 

3946class SoftDeletePolicy(dict): 

3947 """Map a bucket's soft delete policy. 

3948 

3949 See https://cloud.google.com/storage/docs/soft-delete 

3950 

3951 :type bucket: :class:`Bucket` 

3952 :param bucket: Bucket for which this instance is the policy. 

3953 

3954 :type retention_duration_seconds: int 

3955 :param retention_duration_seconds: 

3956 (Optional) The period of time in seconds that soft-deleted objects in the bucket 

3957 will be retained and cannot be permanently deleted. 

3958 

3959 :type effective_time: :class:`datetime.datetime` 

3960 :param effective_time: 

3961 (Optional) When the bucket's soft delete policy is effective. 

3962 This value should normally only be set by the back-end API. 

3963 """ 

3964 

3965 def __init__(self, bucket, **kw): 

3966 data = {} 

3967 retention_duration_seconds = kw.get("retention_duration_seconds") 

3968 data["retentionDurationSeconds"] = retention_duration_seconds 

3969 

3970 effective_time = kw.get("effective_time") 

3971 if effective_time is not None: 

3972 effective_time = _datetime_to_rfc3339(effective_time) 

3973 data["effectiveTime"] = effective_time 

3974 

3975 super().__init__(data) 

3976 self._bucket = bucket 

3977 

3978 @classmethod 

3979 def from_api_repr(cls, resource, bucket): 

3980 """Factory: construct instance from resource. 

3981 

3982 :type resource: dict 

3983 :param resource: mapping as returned from API call. 

3984 

3985 :type bucket: :class:`Bucket` 

3986 :params bucket: Bucket for which this instance is the policy. 

3987 

3988 :rtype: :class:`SoftDeletePolicy` 

3989 :returns: Instance created from resource. 

3990 """ 

3991 instance = cls(bucket) 

3992 instance.update(resource) 

3993 return instance 

3994 

3995 @property 

3996 def bucket(self): 

3997 """Bucket for which this instance is the policy. 

3998 

3999 :rtype: :class:`Bucket` 

4000 :returns: the instance's bucket. 

4001 """ 

4002 return self._bucket 

4003 

4004 @property 

4005 def retention_duration_seconds(self): 

4006 """Get the retention duration of the bucket's soft delete policy. 

4007 

4008 :rtype: int or ``NoneType`` 

4009 :returns: The period of time in seconds that soft-deleted objects in the bucket 

4010 will be retained and cannot be permanently deleted; Or ``None`` if the 

4011 property is not set. 

4012 """ 

4013 duration = self.get("retentionDurationSeconds") 

4014 if duration is not None: 

4015 return int(duration) 

4016 

4017 @retention_duration_seconds.setter 

4018 def retention_duration_seconds(self, value): 

4019 """Set the retention duration of the bucket's soft delete policy. 

4020 

4021 :type value: int 

4022 :param value: 

4023 The period of time in seconds that soft-deleted objects in the bucket 

4024 will be retained and cannot be permanently deleted. 

4025 """ 

4026 self["retentionDurationSeconds"] = value 

4027 self.bucket._patch_property("softDeletePolicy", self) 

4028 

4029 @property 

4030 def effective_time(self): 

4031 """Get the effective time of the bucket's soft delete policy. 

4032 

4033 :rtype: datetime.datetime or ``NoneType`` 

4034 :returns: point-in time at which the bucket's soft delte policy is 

4035 effective, or ``None`` if the property is not set. 

4036 """ 

4037 timestamp = self.get("effectiveTime") 

4038 if timestamp is not None: 

4039 return _rfc3339_nanos_to_datetime(timestamp) 

4040 

4041 

4042def _raise_if_len_differs(expected_len, **generation_match_args): 

4043 """ 

4044 Raise an error if any generation match argument 

4045 is set and its len differs from the given value. 

4046 

4047 :type expected_len: int 

4048 :param expected_len: Expected argument length in case it's set. 

4049 

4050 :type generation_match_args: dict 

4051 :param generation_match_args: Lists, which length must be checked. 

4052 

4053 :raises: :exc:`ValueError` if any argument set, but has an unexpected length. 

4054 """ 

4055 for name, value in generation_match_args.items(): 

4056 if value is not None and len(value) != expected_len: 

4057 raise ValueError(f"'{name}' length must be the same as 'blobs' length")