Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/google/cloud/storage/bucket.py: 36%

674 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-07 07:13 +0000

1# Copyright 2014 Google LLC 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14 

15"""Create / interact with Google Cloud Storage buckets.""" 

16 

17import base64 

18import copy 

19import datetime 

20import json 

21from urllib.parse import urlsplit 

22import warnings 

23 

24from google.api_core import datetime_helpers 

25from google.cloud._helpers import _datetime_to_rfc3339 

26from google.cloud._helpers import _NOW 

27from google.cloud._helpers import _rfc3339_nanos_to_datetime 

28from google.cloud.exceptions import NotFound 

29from google.api_core.iam import Policy 

30from google.cloud.storage import _signing 

31from google.cloud.storage._helpers import _add_etag_match_headers 

32from google.cloud.storage._helpers import _add_generation_match_parameters 

33from google.cloud.storage._helpers import _PropertyMixin 

34from google.cloud.storage._helpers import _scalar_property 

35from google.cloud.storage._helpers import _validate_name 

36from google.cloud.storage._signing import generate_signed_url_v2 

37from google.cloud.storage._signing import generate_signed_url_v4 

38from google.cloud.storage._helpers import _bucket_bound_hostname_url 

39from google.cloud.storage.acl import BucketACL 

40from google.cloud.storage.acl import DefaultObjectACL 

41from google.cloud.storage.blob import Blob 

42from google.cloud.storage.constants import _DEFAULT_TIMEOUT 

43from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS 

44from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS 

45from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE 

46from google.cloud.storage.constants import ( 

47 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, 

48) 

49from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS 

50from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE 

51from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS 

52from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED 

53from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS 

54from google.cloud.storage.constants import REGION_LOCATION_TYPE 

55from google.cloud.storage.constants import STANDARD_STORAGE_CLASS 

56from google.cloud.storage.notification import BucketNotification 

57from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT 

58from google.cloud.storage.retry import DEFAULT_RETRY 

59from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED 

60from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON 

61from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED 

62 

63 

64_UBLA_BPO_ENABLED_MESSAGE = ( 

65 "Pass only one of 'uniform_bucket_level_access_enabled' / " 

66 "'bucket_policy_only_enabled' to 'IAMConfiguration'." 

67) 

68_BPO_ENABLED_MESSAGE = ( 

69 "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. " 

70 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'." 

71) 

72_UBLA_BPO_LOCK_TIME_MESSAGE = ( 

73 "Pass only one of 'uniform_bucket_level_access_lock_time' / " 

74 "'bucket_policy_only_lock_time' to 'IAMConfiguration'." 

75) 

76_BPO_LOCK_TIME_MESSAGE = ( 

77 "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. " 

78 "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'." 

79) 

80_LOCATION_SETTER_MESSAGE = ( 

81 "Assignment to 'Bucket.location' is deprecated, as it is only " 

82 "valid before the bucket is created. Instead, pass the location " 

83 "to `Bucket.create`." 

84) 

85_API_ACCESS_ENDPOINT = "https://storage.googleapis.com" 

86 

87 

88def _blobs_page_start(iterator, page, response): 

89 """Grab prefixes after a :class:`~google.cloud.iterator.Page` started. 

90 

91 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

92 :param iterator: The iterator that is currently in use. 

93 

94 :type page: :class:`~google.cloud.api.core.page_iterator.Page` 

95 :param page: The page that was just created. 

96 

97 :type response: dict 

98 :param response: The JSON API response for a page of blobs. 

99 """ 

100 page.prefixes = tuple(response.get("prefixes", ())) 

101 iterator.prefixes.update(page.prefixes) 

102 

103 

104def _item_to_blob(iterator, item): 

105 """Convert a JSON blob to the native object. 

106 

107 .. note:: 

108 

109 This assumes that the ``bucket`` attribute has been 

110 added to the iterator after being created. 

111 

112 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

113 :param iterator: The iterator that has retrieved the item. 

114 

115 :type item: dict 

116 :param item: An item to be converted to a blob. 

117 

118 :rtype: :class:`.Blob` 

119 :returns: The next blob in the page. 

120 """ 

121 name = item.get("name") 

122 blob = Blob(name, bucket=iterator.bucket) 

123 blob._set_properties(item) 

124 return blob 

125 

126 

127def _item_to_notification(iterator, item): 

128 """Convert a JSON blob to the native object. 

129 

130 .. note:: 

131 

132 This assumes that the ``bucket`` attribute has been 

133 added to the iterator after being created. 

134 

135 :type iterator: :class:`~google.api_core.page_iterator.Iterator` 

136 :param iterator: The iterator that has retrieved the item. 

137 

138 :type item: dict 

139 :param item: An item to be converted to a blob. 

140 

141 :rtype: :class:`.BucketNotification` 

142 :returns: The next notification being iterated. 

143 """ 

144 return BucketNotification.from_api_repr(item, bucket=iterator.bucket) 

145 

146 

147class LifecycleRuleConditions(dict): 

148 """Map a single lifecycle rule for a bucket. 

149 

150 See: https://cloud.google.com/storage/docs/lifecycle 

151 

152 :type age: int 

153 :param age: (Optional) Apply rule action to items whose age, in days, 

154 exceeds this value. 

155 

156 :type created_before: datetime.date 

157 :param created_before: (Optional) Apply rule action to items created 

158 before this date. 

159 

160 :type is_live: bool 

161 :param is_live: (Optional) If true, apply rule action to non-versioned 

162 items, or to items with no newer versions. If false, apply 

163 rule action to versioned items with at least one newer 

164 version. 

165 

166 :type matches_prefix: list(str) 

167 :param matches_prefix: (Optional) Apply rule action to items which 

168 any prefix matches the beginning of the item name. 

169 

170 :type matches_storage_class: list(str), one or more of 

171 :attr:`Bucket.STORAGE_CLASSES`. 

172 :param matches_storage_class: (Optional) Apply rule action to items 

173 whose storage class matches this value. 

174 

175 :type matches_suffix: list(str) 

176 :param matches_suffix: (Optional) Apply rule action to items which 

177 any suffix matches the end of the item name. 

178 

179 :type number_of_newer_versions: int 

180 :param number_of_newer_versions: (Optional) Apply rule action to versioned 

181 items having N newer versions. 

182 

183 :type days_since_custom_time: int 

184 :param days_since_custom_time: (Optional) Apply rule action to items whose number of days 

185 elapsed since the custom timestamp. This condition is relevant 

186 only for versioned objects. The value of the field must be a non 

187 negative integer. If it's zero, the object version will become 

188 eligible for lifecycle action as soon as it becomes custom. 

189 

190 :type custom_time_before: :class:`datetime.date` 

191 :param custom_time_before: (Optional) Date object parsed from RFC3339 valid date, apply rule action 

192 to items whose custom time is before this date. This condition is relevant 

193 only for versioned objects, e.g., 2019-03-16. 

194 

195 :type days_since_noncurrent_time: int 

196 :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days 

197 elapsed since the non current timestamp. This condition 

198 is relevant only for versioned objects. The value of the field 

199 must be a non negative integer. If it's zero, the object version 

200 will become eligible for lifecycle action as soon as it becomes 

201 non current. 

202 

203 :type noncurrent_time_before: :class:`datetime.date` 

204 :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply 

205 rule action to items whose non current time is before this date. 

206 This condition is relevant only for versioned objects, e.g, 2019-03-16. 

207 

208 :raises ValueError: if no arguments are passed. 

209 """ 

210 

211 def __init__( 

212 self, 

213 age=None, 

214 created_before=None, 

215 is_live=None, 

216 matches_storage_class=None, 

217 number_of_newer_versions=None, 

218 days_since_custom_time=None, 

219 custom_time_before=None, 

220 days_since_noncurrent_time=None, 

221 noncurrent_time_before=None, 

222 matches_prefix=None, 

223 matches_suffix=None, 

224 _factory=False, 

225 ): 

226 conditions = {} 

227 

228 if age is not None: 

229 conditions["age"] = age 

230 

231 if created_before is not None: 

232 conditions["createdBefore"] = created_before.isoformat() 

233 

234 if is_live is not None: 

235 conditions["isLive"] = is_live 

236 

237 if matches_storage_class is not None: 

238 conditions["matchesStorageClass"] = matches_storage_class 

239 

240 if number_of_newer_versions is not None: 

241 conditions["numNewerVersions"] = number_of_newer_versions 

242 

243 if days_since_custom_time is not None: 

244 conditions["daysSinceCustomTime"] = days_since_custom_time 

245 

246 if custom_time_before is not None: 

247 conditions["customTimeBefore"] = custom_time_before.isoformat() 

248 

249 if days_since_noncurrent_time is not None: 

250 conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time 

251 

252 if noncurrent_time_before is not None: 

253 conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat() 

254 

255 if matches_prefix is not None: 

256 conditions["matchesPrefix"] = matches_prefix 

257 

258 if matches_suffix is not None: 

259 conditions["matchesSuffix"] = matches_suffix 

260 

261 if not _factory and not conditions: 

262 raise ValueError("Supply at least one condition") 

263 

264 super(LifecycleRuleConditions, self).__init__(conditions) 

265 

266 @classmethod 

267 def from_api_repr(cls, resource): 

268 """Factory: construct instance from resource. 

269 

270 :type resource: dict 

271 :param resource: mapping as returned from API call. 

272 

273 :rtype: :class:`LifecycleRuleConditions` 

274 :returns: Instance created from resource. 

275 """ 

276 instance = cls(_factory=True) 

277 instance.update(resource) 

278 return instance 

279 

280 @property 

281 def age(self): 

282 """Conditon's age value.""" 

283 return self.get("age") 

284 

285 @property 

286 def created_before(self): 

287 """Conditon's created_before value.""" 

288 before = self.get("createdBefore") 

289 if before is not None: 

290 return datetime_helpers.from_iso8601_date(before) 

291 

292 @property 

293 def is_live(self): 

294 """Conditon's 'is_live' value.""" 

295 return self.get("isLive") 

296 

297 @property 

298 def matches_prefix(self): 

299 """Conditon's 'matches_prefix' value.""" 

300 return self.get("matchesPrefix") 

301 

302 @property 

303 def matches_storage_class(self): 

304 """Conditon's 'matches_storage_class' value.""" 

305 return self.get("matchesStorageClass") 

306 

307 @property 

308 def matches_suffix(self): 

309 """Conditon's 'matches_suffix' value.""" 

310 return self.get("matchesSuffix") 

311 

312 @property 

313 def number_of_newer_versions(self): 

314 """Conditon's 'number_of_newer_versions' value.""" 

315 return self.get("numNewerVersions") 

316 

317 @property 

318 def days_since_custom_time(self): 

319 """Conditon's 'days_since_custom_time' value.""" 

320 return self.get("daysSinceCustomTime") 

321 

322 @property 

323 def custom_time_before(self): 

324 """Conditon's 'custom_time_before' value.""" 

325 before = self.get("customTimeBefore") 

326 if before is not None: 

327 return datetime_helpers.from_iso8601_date(before) 

328 

329 @property 

330 def days_since_noncurrent_time(self): 

331 """Conditon's 'days_since_noncurrent_time' value.""" 

332 return self.get("daysSinceNoncurrentTime") 

333 

334 @property 

335 def noncurrent_time_before(self): 

336 """Conditon's 'noncurrent_time_before' value.""" 

337 before = self.get("noncurrentTimeBefore") 

338 if before is not None: 

339 return datetime_helpers.from_iso8601_date(before) 

340 

341 

342class LifecycleRuleDelete(dict): 

343 """Map a lifecycle rule deleting matching items. 

344 

345 :type kw: dict 

346 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

347 """ 

348 

349 def __init__(self, **kw): 

350 conditions = LifecycleRuleConditions(**kw) 

351 rule = {"action": {"type": "Delete"}, "condition": dict(conditions)} 

352 super().__init__(rule) 

353 

354 @classmethod 

355 def from_api_repr(cls, resource): 

356 """Factory: construct instance from resource. 

357 

358 :type resource: dict 

359 :param resource: mapping as returned from API call. 

360 

361 :rtype: :class:`LifecycleRuleDelete` 

362 :returns: Instance created from resource. 

363 """ 

364 instance = cls(_factory=True) 

365 instance.update(resource) 

366 return instance 

367 

368 

369class LifecycleRuleSetStorageClass(dict): 

370 """Map a lifecycle rule updating storage class of matching items. 

371 

372 :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`. 

373 :param storage_class: new storage class to assign to matching items. 

374 

375 :type kw: dict 

376 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

377 """ 

378 

379 def __init__(self, storage_class, **kw): 

380 conditions = LifecycleRuleConditions(**kw) 

381 rule = { 

382 "action": {"type": "SetStorageClass", "storageClass": storage_class}, 

383 "condition": dict(conditions), 

384 } 

385 super().__init__(rule) 

386 

387 @classmethod 

388 def from_api_repr(cls, resource): 

389 """Factory: construct instance from resource. 

390 

391 :type resource: dict 

392 :param resource: mapping as returned from API call. 

393 

394 :rtype: :class:`LifecycleRuleSetStorageClass` 

395 :returns: Instance created from resource. 

396 """ 

397 action = resource["action"] 

398 instance = cls(action["storageClass"], _factory=True) 

399 instance.update(resource) 

400 return instance 

401 

402 

403class LifecycleRuleAbortIncompleteMultipartUpload(dict): 

404 """Map a rule aborting incomplete multipart uploads of matching items. 

405 

406 The "age" lifecycle condition is the only supported condition for this rule. 

407 

408 :type kw: dict 

409 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

410 """ 

411 

412 def __init__(self, **kw): 

413 conditions = LifecycleRuleConditions(**kw) 

414 rule = { 

415 "action": {"type": "AbortIncompleteMultipartUpload"}, 

416 "condition": dict(conditions), 

417 } 

418 super().__init__(rule) 

419 

420 @classmethod 

421 def from_api_repr(cls, resource): 

422 """Factory: construct instance from resource. 

423 

424 :type resource: dict 

425 :param resource: mapping as returned from API call. 

426 

427 :rtype: :class:`LifecycleRuleAbortIncompleteMultipartUpload` 

428 :returns: Instance created from resource. 

429 """ 

430 instance = cls(_factory=True) 

431 instance.update(resource) 

432 return instance 

433 

434 

435_default = object() 

436 

437 

438class IAMConfiguration(dict): 

439 """Map a bucket's IAM configuration. 

440 

441 :type bucket: :class:`Bucket` 

442 :params bucket: Bucket for which this instance is the policy. 

443 

444 :type public_access_prevention: str 

445 :params public_access_prevention: 

446 (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced' 

447 See: https://cloud.google.com/storage/docs/public-access-prevention 

448 

449 :type uniform_bucket_level_access_enabled: bool 

450 :params bucket_policy_only_enabled: 

451 (Optional) Whether the IAM-only policy is enabled for the bucket. 

452 

453 :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime` 

454 :params uniform_bucket_level_locked_time: 

455 (Optional) When the bucket's IAM-only policy was enabled. 

456 This value should normally only be set by the back-end API. 

457 

458 :type bucket_policy_only_enabled: bool 

459 :params bucket_policy_only_enabled: 

460 Deprecated alias for :data:`uniform_bucket_level_access_enabled`. 

461 

462 :type bucket_policy_only_locked_time: :class:`datetime.datetime` 

463 :params bucket_policy_only_locked_time: 

464 Deprecated alias for :data:`uniform_bucket_level_access_locked_time`. 

465 """ 

466 

467 def __init__( 

468 self, 

469 bucket, 

470 public_access_prevention=_default, 

471 uniform_bucket_level_access_enabled=_default, 

472 uniform_bucket_level_access_locked_time=_default, 

473 bucket_policy_only_enabled=_default, 

474 bucket_policy_only_locked_time=_default, 

475 ): 

476 if bucket_policy_only_enabled is not _default: 

477 

478 if uniform_bucket_level_access_enabled is not _default: 

479 raise ValueError(_UBLA_BPO_ENABLED_MESSAGE) 

480 

481 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

482 uniform_bucket_level_access_enabled = bucket_policy_only_enabled 

483 

484 if bucket_policy_only_locked_time is not _default: 

485 

486 if uniform_bucket_level_access_locked_time is not _default: 

487 raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE) 

488 

489 warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2) 

490 uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time 

491 

492 if uniform_bucket_level_access_enabled is _default: 

493 uniform_bucket_level_access_enabled = False 

494 

495 if public_access_prevention is _default: 

496 public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED 

497 

498 data = { 

499 "uniformBucketLevelAccess": { 

500 "enabled": uniform_bucket_level_access_enabled 

501 }, 

502 "publicAccessPrevention": public_access_prevention, 

503 } 

504 if uniform_bucket_level_access_locked_time is not _default: 

505 data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339( 

506 uniform_bucket_level_access_locked_time 

507 ) 

508 super(IAMConfiguration, self).__init__(data) 

509 self._bucket = bucket 

510 

511 @classmethod 

512 def from_api_repr(cls, resource, bucket): 

513 """Factory: construct instance from resource. 

514 

515 :type bucket: :class:`Bucket` 

516 :params bucket: Bucket for which this instance is the policy. 

517 

518 :type resource: dict 

519 :param resource: mapping as returned from API call. 

520 

521 :rtype: :class:`IAMConfiguration` 

522 :returns: Instance created from resource. 

523 """ 

524 instance = cls(bucket) 

525 instance.update(resource) 

526 return instance 

527 

528 @property 

529 def bucket(self): 

530 """Bucket for which this instance is the policy. 

531 

532 :rtype: :class:`Bucket` 

533 :returns: the instance's bucket. 

534 """ 

535 return self._bucket 

536 

537 @property 

538 def public_access_prevention(self): 

539 """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'. 

540 

541 See: https://cloud.google.com/storage/docs/public-access-prevention 

542 

543 :rtype: string 

544 :returns: the public access prevention status, either 'enforced' or 'inherited'. 

545 """ 

546 return self["publicAccessPrevention"] 

547 

548 @public_access_prevention.setter 

549 def public_access_prevention(self, value): 

550 self["publicAccessPrevention"] = value 

551 self.bucket._patch_property("iamConfiguration", self) 

552 

553 @property 

554 def uniform_bucket_level_access_enabled(self): 

555 """If set, access checks only use bucket-level IAM policies or above. 

556 

557 :rtype: bool 

558 :returns: whether the bucket is configured to allow only IAM. 

559 """ 

560 ubla = self.get("uniformBucketLevelAccess", {}) 

561 return ubla.get("enabled", False) 

562 

563 @uniform_bucket_level_access_enabled.setter 

564 def uniform_bucket_level_access_enabled(self, value): 

565 ubla = self.setdefault("uniformBucketLevelAccess", {}) 

566 ubla["enabled"] = bool(value) 

567 self.bucket._patch_property("iamConfiguration", self) 

568 

569 @property 

570 def uniform_bucket_level_access_locked_time(self): 

571 """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false. 

572 

573 If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property 

574 is time time after which that setting becomes immutable. 

575 

576 If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property 

577 is ``None``. 

578 

579 :rtype: Union[:class:`datetime.datetime`, None] 

580 :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will 

581 be frozen as true. 

582 """ 

583 ubla = self.get("uniformBucketLevelAccess", {}) 

584 stamp = ubla.get("lockedTime") 

585 if stamp is not None: 

586 stamp = _rfc3339_nanos_to_datetime(stamp) 

587 return stamp 

588 

589 @property 

590 def bucket_policy_only_enabled(self): 

591 """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`. 

592 

593 :rtype: bool 

594 :returns: whether the bucket is configured to allow only IAM. 

595 """ 

596 return self.uniform_bucket_level_access_enabled 

597 

598 @bucket_policy_only_enabled.setter 

599 def bucket_policy_only_enabled(self, value): 

600 warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) 

601 self.uniform_bucket_level_access_enabled = value 

602 

603 @property 

604 def bucket_policy_only_locked_time(self): 

605 """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`. 

606 

607 :rtype: Union[:class:`datetime.datetime`, None] 

608 :returns: 

609 (readonly) Time after which :attr:`bucket_policy_only_enabled` will 

610 be frozen as true. 

611 """ 

612 return self.uniform_bucket_level_access_locked_time 

613 

614 

615class Bucket(_PropertyMixin): 

616 """A class representing a Bucket on Cloud Storage. 

617 

618 :type client: :class:`google.cloud.storage.client.Client` 

619 :param client: A client which holds credentials and project configuration 

620 for the bucket (which requires a project). 

621 

622 :type name: str 

623 :param name: The name of the bucket. Bucket names must start and end with a 

624 number or letter. 

625 

626 :type user_project: str 

627 :param user_project: (Optional) the project ID to be billed for API 

628 requests made via this instance. 

629 """ 

630 

631 _MAX_OBJECTS_FOR_ITERATION = 256 

632 """Maximum number of existing objects allowed in iteration. 

633 

634 This is used in Bucket.delete() and Bucket.make_public(). 

635 """ 

636 

637 STORAGE_CLASSES = ( 

638 STANDARD_STORAGE_CLASS, 

639 NEARLINE_STORAGE_CLASS, 

640 COLDLINE_STORAGE_CLASS, 

641 ARCHIVE_STORAGE_CLASS, 

642 MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

643 REGIONAL_LEGACY_STORAGE_CLASS, # legacy 

644 DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy 

645 ) 

646 """Allowed values for :attr:`storage_class`. 

647 

648 Default value is :attr:`STANDARD_STORAGE_CLASS`. 

649 

650 See 

651 https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass 

652 https://cloud.google.com/storage/docs/storage-classes 

653 """ 

654 

655 _LOCATION_TYPES = ( 

656 MULTI_REGION_LOCATION_TYPE, 

657 REGION_LOCATION_TYPE, 

658 DUAL_REGION_LOCATION_TYPE, 

659 ) 

660 """Allowed values for :attr:`location_type`.""" 

661 

662 def __init__(self, client, name=None, user_project=None): 

663 """ 

664 property :attr:`name` 

665 Get the bucket's name. 

666 """ 

667 name = _validate_name(name) 

668 super(Bucket, self).__init__(name=name) 

669 self._client = client 

670 self._acl = BucketACL(self) 

671 self._default_object_acl = DefaultObjectACL(self) 

672 self._label_removals = set() 

673 self._user_project = user_project 

674 

675 def __repr__(self): 

676 return f"<Bucket: {self.name}>" 

677 

678 @property 

679 def client(self): 

680 """The client bound to this bucket.""" 

681 return self._client 

682 

683 def _set_properties(self, value): 

684 """Set the properties for the current object. 

685 

686 :type value: dict or :class:`google.cloud.storage.batch._FutureDict` 

687 :param value: The properties to be set. 

688 """ 

689 self._label_removals.clear() 

690 return super(Bucket, self)._set_properties(value) 

691 

692 @property 

693 def rpo(self): 

694 """Get the RPO (Recovery Point Objective) of this bucket 

695 

696 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

697 

698 "ASYNC_TURBO" or "DEFAULT" 

699 :rtype: str 

700 """ 

701 return self._properties.get("rpo") 

702 

703 @rpo.setter 

704 def rpo(self, value): 

705 """ 

706 Set the RPO (Recovery Point Objective) of this bucket. 

707 

708 See: https://cloud.google.com/storage/docs/managing-turbo-replication 

709 

710 :type value: str 

711 :param value: "ASYNC_TURBO" or "DEFAULT" 

712 """ 

713 self._patch_property("rpo", value) 

714 

715 @property 

716 def user_project(self): 

717 """Project ID to be billed for API requests made via this bucket. 

718 

719 If unset, API requests are billed to the bucket owner. 

720 

721 A user project is required for all operations on Requester Pays buckets. 

722 

723 See https://cloud.google.com/storage/docs/requester-pays#requirements for details. 

724 

725 :rtype: str 

726 """ 

727 return self._user_project 

728 

729 @classmethod 

730 def from_string(cls, uri, client=None): 

731 """Get a constructor for bucket object by URI. 

732 

733 .. code-block:: python 

734 

735 from google.cloud import storage 

736 from google.cloud.storage.bucket import Bucket 

737 client = storage.Client() 

738 bucket = Bucket.from_string("gs://bucket", client=client) 

739 

740 :type uri: str 

741 :param uri: The bucket uri pass to get bucket object. 

742 

743 :type client: :class:`~google.cloud.storage.client.Client` or 

744 ``NoneType`` 

745 :param client: (Optional) The client to use. Application code should 

746 *always* pass ``client``. 

747 

748 :rtype: :class:`google.cloud.storage.bucket.Bucket` 

749 :returns: The bucket object created. 

750 """ 

751 scheme, netloc, path, query, frag = urlsplit(uri) 

752 

753 if scheme != "gs": 

754 raise ValueError("URI scheme must be gs") 

755 

756 return cls(client, name=netloc) 

757 

758 def blob( 

759 self, 

760 blob_name, 

761 chunk_size=None, 

762 encryption_key=None, 

763 kms_key_name=None, 

764 generation=None, 

765 ): 

766 """Factory constructor for blob object. 

767 

768 .. note:: 

769 This will not make an HTTP request; it simply instantiates 

770 a blob object owned by this bucket. 

771 

772 :type blob_name: str 

773 :param blob_name: The name of the blob to be instantiated. 

774 

775 :type chunk_size: int 

776 :param chunk_size: The size of a chunk of data whenever iterating 

777 (in bytes). This must be a multiple of 256 KB per 

778 the API specification. 

779 

780 :type encryption_key: bytes 

781 :param encryption_key: 

782 (Optional) 32 byte encryption key for customer-supplied encryption. 

783 

784 :type kms_key_name: str 

785 :param kms_key_name: 

786 (Optional) Resource name of KMS key used to encrypt blob's content. 

787 

788 :type generation: long 

789 :param generation: (Optional) If present, selects a specific revision of 

790 this object. 

791 

792 :rtype: :class:`google.cloud.storage.blob.Blob` 

793 :returns: The blob object created. 

794 """ 

795 return Blob( 

796 name=blob_name, 

797 bucket=self, 

798 chunk_size=chunk_size, 

799 encryption_key=encryption_key, 

800 kms_key_name=kms_key_name, 

801 generation=generation, 

802 ) 

803 

804 def notification( 

805 self, 

806 topic_name=None, 

807 topic_project=None, 

808 custom_attributes=None, 

809 event_types=None, 

810 blob_name_prefix=None, 

811 payload_format=NONE_PAYLOAD_FORMAT, 

812 notification_id=None, 

813 ): 

814 """Factory: create a notification resource for the bucket. 

815 

816 See: :class:`.BucketNotification` for parameters. 

817 

818 :rtype: :class:`.BucketNotification` 

819 """ 

820 return BucketNotification( 

821 self, 

822 topic_name=topic_name, 

823 topic_project=topic_project, 

824 custom_attributes=custom_attributes, 

825 event_types=event_types, 

826 blob_name_prefix=blob_name_prefix, 

827 payload_format=payload_format, 

828 notification_id=notification_id, 

829 ) 

830 

831 def exists( 

832 self, 

833 client=None, 

834 timeout=_DEFAULT_TIMEOUT, 

835 if_etag_match=None, 

836 if_etag_not_match=None, 

837 if_metageneration_match=None, 

838 if_metageneration_not_match=None, 

839 retry=DEFAULT_RETRY, 

840 ): 

841 """Determines whether or not this bucket exists. 

842 

843 If :attr:`user_project` is set, bills the API request to that project. 

844 

845 :type client: :class:`~google.cloud.storage.client.Client` or 

846 ``NoneType`` 

847 :param client: (Optional) The client to use. If not passed, falls back 

848 to the ``client`` stored on the current bucket. 

849 

850 :type timeout: float or tuple 

851 :param timeout: 

852 (Optional) The amount of time, in seconds, to wait 

853 for the server response. See: :ref:`configuring_timeouts` 

854 

855 :type if_etag_match: Union[str, Set[str]] 

856 :param if_etag_match: (Optional) Make the operation conditional on whether the 

857 bucket's current ETag matches the given value. 

858 

859 :type if_etag_not_match: Union[str, Set[str]]) 

860 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

861 bucket's current ETag does not match the given value. 

862 

863 :type if_metageneration_match: long 

864 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

865 bucket's current metageneration matches the given value. 

866 

867 :type if_metageneration_not_match: long 

868 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

869 bucket's current metageneration does not match the given value. 

870 

871 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

872 :param retry: 

873 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

874 

875 :rtype: bool 

876 :returns: True if the bucket exists in Cloud Storage. 

877 """ 

878 client = self._require_client(client) 

879 # We only need the status code (200 or not) so we seek to 

880 # minimize the returned payload. 

881 query_params = {"fields": "name"} 

882 

883 if self.user_project is not None: 

884 query_params["userProject"] = self.user_project 

885 

886 _add_generation_match_parameters( 

887 query_params, 

888 if_metageneration_match=if_metageneration_match, 

889 if_metageneration_not_match=if_metageneration_not_match, 

890 ) 

891 

892 headers = {} 

893 _add_etag_match_headers( 

894 headers, if_etag_match=if_etag_match, if_etag_not_match=if_etag_not_match 

895 ) 

896 

897 try: 

898 # We intentionally pass `_target_object=None` since fields=name 

899 # would limit the local properties. 

900 client._get_resource( 

901 self.path, 

902 query_params=query_params, 

903 headers=headers, 

904 timeout=timeout, 

905 retry=retry, 

906 _target_object=None, 

907 ) 

908 except NotFound: 

909 # NOTE: This will not fail immediately in a batch. However, when 

910 # Batch.finish() is called, the resulting `NotFound` will be 

911 # raised. 

912 return False 

913 return True 

914 

915 def create( 

916 self, 

917 client=None, 

918 project=None, 

919 location=None, 

920 predefined_acl=None, 

921 predefined_default_object_acl=None, 

922 timeout=_DEFAULT_TIMEOUT, 

923 retry=DEFAULT_RETRY, 

924 ): 

925 """Creates current bucket. 

926 

927 If the bucket already exists, will raise 

928 :class:`google.cloud.exceptions.Conflict`. 

929 

930 This implements "storage.buckets.insert". 

931 

932 If :attr:`user_project` is set, bills the API request to that project. 

933 

934 :type client: :class:`~google.cloud.storage.client.Client` or 

935 ``NoneType`` 

936 :param client: (Optional) The client to use. If not passed, falls back 

937 to the ``client`` stored on the current bucket. 

938 

939 :type project: str 

940 :param project: (Optional) The project under which the bucket is to 

941 be created. If not passed, uses the project set on 

942 the client. 

943 :raises ValueError: if ``project`` is None and client's 

944 :attr:`project` is also None. 

945 

946 :type location: str 

947 :param location: (Optional) The location of the bucket. If not passed, 

948 the default location, US, will be used. See 

949 https://cloud.google.com/storage/docs/bucket-locations 

950 

951 :type predefined_acl: str 

952 :param predefined_acl: 

953 (Optional) Name of predefined ACL to apply to bucket. See: 

954 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

955 

956 :type predefined_default_object_acl: str 

957 :param predefined_default_object_acl: 

958 (Optional) Name of predefined ACL to apply to bucket's objects. See: 

959 https://cloud.google.com/storage/docs/access-control/lists#predefined-acl 

960 

961 :type timeout: float or tuple 

962 :param timeout: 

963 (Optional) The amount of time, in seconds, to wait 

964 for the server response. See: :ref:`configuring_timeouts` 

965 

966 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

967 :param retry: 

968 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

969 """ 

970 

971 client = self._require_client(client) 

972 client.create_bucket( 

973 bucket_or_name=self, 

974 project=project, 

975 user_project=self.user_project, 

976 location=location, 

977 predefined_acl=predefined_acl, 

978 predefined_default_object_acl=predefined_default_object_acl, 

979 timeout=timeout, 

980 retry=retry, 

981 ) 

982 

983 def update( 

984 self, 

985 client=None, 

986 timeout=_DEFAULT_TIMEOUT, 

987 if_metageneration_match=None, 

988 if_metageneration_not_match=None, 

989 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

990 ): 

991 """Sends all properties in a PUT request. 

992 

993 Updates the ``_properties`` with the response from the backend. 

994 

995 If :attr:`user_project` is set, bills the API request to that project. 

996 

997 :type client: :class:`~google.cloud.storage.client.Client` or 

998 ``NoneType`` 

999 :param client: the client to use. If not passed, falls back to the 

1000 ``client`` stored on the current object. 

1001 

1002 :type timeout: float or tuple 

1003 :param timeout: 

1004 (Optional) The amount of time, in seconds, to wait 

1005 for the server response. See: :ref:`configuring_timeouts` 

1006 

1007 :type if_metageneration_match: long 

1008 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1009 blob's current metageneration matches the given value. 

1010 

1011 :type if_metageneration_not_match: long 

1012 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1013 blob's current metageneration does not match the given value. 

1014 

1015 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1016 :param retry: 

1017 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1018 """ 

1019 super(Bucket, self).update( 

1020 client=client, 

1021 timeout=timeout, 

1022 if_metageneration_match=if_metageneration_match, 

1023 if_metageneration_not_match=if_metageneration_not_match, 

1024 retry=retry, 

1025 ) 

1026 

1027 def reload( 

1028 self, 

1029 client=None, 

1030 projection="noAcl", 

1031 timeout=_DEFAULT_TIMEOUT, 

1032 if_etag_match=None, 

1033 if_etag_not_match=None, 

1034 if_metageneration_match=None, 

1035 if_metageneration_not_match=None, 

1036 retry=DEFAULT_RETRY, 

1037 ): 

1038 """Reload properties from Cloud Storage. 

1039 

1040 If :attr:`user_project` is set, bills the API request to that project. 

1041 

1042 :type client: :class:`~google.cloud.storage.client.Client` or 

1043 ``NoneType`` 

1044 :param client: the client to use. If not passed, falls back to the 

1045 ``client`` stored on the current object. 

1046 

1047 :type projection: str 

1048 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1049 Defaults to ``'noAcl'``. Specifies the set of 

1050 properties to return. 

1051 

1052 :type timeout: float or tuple 

1053 :param timeout: 

1054 (Optional) The amount of time, in seconds, to wait 

1055 for the server response. See: :ref:`configuring_timeouts` 

1056 

1057 :type if_etag_match: Union[str, Set[str]] 

1058 :param if_etag_match: (Optional) Make the operation conditional on whether the 

1059 bucket's current ETag matches the given value. 

1060 

1061 :type if_etag_not_match: Union[str, Set[str]]) 

1062 :param if_etag_not_match: (Optional) Make the operation conditional on whether the 

1063 bucket's current ETag does not match the given value. 

1064 

1065 :type if_metageneration_match: long 

1066 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1067 bucket's current metageneration matches the given value. 

1068 

1069 :type if_metageneration_not_match: long 

1070 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1071 bucket's current metageneration does not match the given value. 

1072 

1073 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1074 :param retry: 

1075 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1076 """ 

1077 super(Bucket, self).reload( 

1078 client=client, 

1079 projection=projection, 

1080 timeout=timeout, 

1081 if_etag_match=if_etag_match, 

1082 if_etag_not_match=if_etag_not_match, 

1083 if_metageneration_match=if_metageneration_match, 

1084 if_metageneration_not_match=if_metageneration_not_match, 

1085 retry=retry, 

1086 ) 

1087 

1088 def patch( 

1089 self, 

1090 client=None, 

1091 timeout=_DEFAULT_TIMEOUT, 

1092 if_metageneration_match=None, 

1093 if_metageneration_not_match=None, 

1094 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

1095 ): 

1096 """Sends all changed properties in a PATCH request. 

1097 

1098 Updates the ``_properties`` with the response from the backend. 

1099 

1100 If :attr:`user_project` is set, bills the API request to that project. 

1101 

1102 :type client: :class:`~google.cloud.storage.client.Client` or 

1103 ``NoneType`` 

1104 :param client: the client to use. If not passed, falls back to the 

1105 ``client`` stored on the current object. 

1106 

1107 :type timeout: float or tuple 

1108 :param timeout: 

1109 (Optional) The amount of time, in seconds, to wait 

1110 for the server response. See: :ref:`configuring_timeouts` 

1111 

1112 :type if_metageneration_match: long 

1113 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1114 blob's current metageneration matches the given value. 

1115 

1116 :type if_metageneration_not_match: long 

1117 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1118 blob's current metageneration does not match the given value. 

1119 

1120 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1121 :param retry: 

1122 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1123 """ 

1124 # Special case: For buckets, it is possible that labels are being 

1125 # removed; this requires special handling. 

1126 if self._label_removals: 

1127 self._changes.add("labels") 

1128 self._properties.setdefault("labels", {}) 

1129 for removed_label in self._label_removals: 

1130 self._properties["labels"][removed_label] = None 

1131 

1132 # Call the superclass method. 

1133 super(Bucket, self).patch( 

1134 client=client, 

1135 if_metageneration_match=if_metageneration_match, 

1136 if_metageneration_not_match=if_metageneration_not_match, 

1137 timeout=timeout, 

1138 retry=retry, 

1139 ) 

1140 

1141 @property 

1142 def acl(self): 

1143 """Create our ACL on demand.""" 

1144 return self._acl 

1145 

1146 @property 

1147 def default_object_acl(self): 

1148 """Create our defaultObjectACL on demand.""" 

1149 return self._default_object_acl 

1150 

1151 @staticmethod 

1152 def path_helper(bucket_name): 

1153 """Relative URL path for a bucket. 

1154 

1155 :type bucket_name: str 

1156 :param bucket_name: The bucket name in the path. 

1157 

1158 :rtype: str 

1159 :returns: The relative URL path for ``bucket_name``. 

1160 """ 

1161 return "/b/" + bucket_name 

1162 

1163 @property 

1164 def path(self): 

1165 """The URL path to this bucket.""" 

1166 if not self.name: 

1167 raise ValueError("Cannot determine path without bucket name.") 

1168 

1169 return self.path_helper(self.name) 

1170 

1171 def get_blob( 

1172 self, 

1173 blob_name, 

1174 client=None, 

1175 encryption_key=None, 

1176 generation=None, 

1177 if_etag_match=None, 

1178 if_etag_not_match=None, 

1179 if_generation_match=None, 

1180 if_generation_not_match=None, 

1181 if_metageneration_match=None, 

1182 if_metageneration_not_match=None, 

1183 timeout=_DEFAULT_TIMEOUT, 

1184 retry=DEFAULT_RETRY, 

1185 **kwargs, 

1186 ): 

1187 """Get a blob object by name. 

1188 

1189 See a [code sample](https://cloud.google.com/storage/docs/samples/storage-get-metadata#storage_get_metadata-python) 

1190 on how to retrieve metadata of an object. 

1191 

1192 If :attr:`user_project` is set, bills the API request to that project. 

1193 

1194 :type blob_name: str 

1195 :param blob_name: The name of the blob to retrieve. 

1196 

1197 :type client: :class:`~google.cloud.storage.client.Client` or 

1198 ``NoneType`` 

1199 :param client: (Optional) The client to use. If not passed, falls back 

1200 to the ``client`` stored on the current bucket. 

1201 

1202 :type encryption_key: bytes 

1203 :param encryption_key: 

1204 (Optional) 32 byte encryption key for customer-supplied encryption. 

1205 See 

1206 https://cloud.google.com/storage/docs/encryption#customer-supplied. 

1207 

1208 :type generation: long 

1209 :param generation: 

1210 (Optional) If present, selects a specific revision of this object. 

1211 

1212 :type if_etag_match: Union[str, Set[str]] 

1213 :param if_etag_match: 

1214 (Optional) See :ref:`using-if-etag-match` 

1215 

1216 :type if_etag_not_match: Union[str, Set[str]] 

1217 :param if_etag_not_match: 

1218 (Optional) See :ref:`using-if-etag-not-match` 

1219 

1220 :type if_generation_match: long 

1221 :param if_generation_match: 

1222 (Optional) See :ref:`using-if-generation-match` 

1223 

1224 :type if_generation_not_match: long 

1225 :param if_generation_not_match: 

1226 (Optional) See :ref:`using-if-generation-not-match` 

1227 

1228 :type if_metageneration_match: long 

1229 :param if_metageneration_match: 

1230 (Optional) See :ref:`using-if-metageneration-match` 

1231 

1232 :type if_metageneration_not_match: long 

1233 :param if_metageneration_not_match: 

1234 (Optional) See :ref:`using-if-metageneration-not-match` 

1235 

1236 :type timeout: float or tuple 

1237 :param timeout: 

1238 (Optional) The amount of time, in seconds, to wait 

1239 for the server response. See: :ref:`configuring_timeouts` 

1240 

1241 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1242 :param retry: 

1243 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1244 

1245 :param kwargs: Keyword arguments to pass to the 

1246 :class:`~google.cloud.storage.blob.Blob` constructor. 

1247 

1248 :rtype: :class:`google.cloud.storage.blob.Blob` or None 

1249 :returns: The blob object if it exists, otherwise None. 

1250 """ 

1251 blob = Blob( 

1252 bucket=self, 

1253 name=blob_name, 

1254 encryption_key=encryption_key, 

1255 generation=generation, 

1256 **kwargs, 

1257 ) 

1258 try: 

1259 # NOTE: This will not fail immediately in a batch. However, when 

1260 # Batch.finish() is called, the resulting `NotFound` will be 

1261 # raised. 

1262 blob.reload( 

1263 client=client, 

1264 timeout=timeout, 

1265 if_etag_match=if_etag_match, 

1266 if_etag_not_match=if_etag_not_match, 

1267 if_generation_match=if_generation_match, 

1268 if_generation_not_match=if_generation_not_match, 

1269 if_metageneration_match=if_metageneration_match, 

1270 if_metageneration_not_match=if_metageneration_not_match, 

1271 retry=retry, 

1272 ) 

1273 except NotFound: 

1274 return None 

1275 else: 

1276 return blob 

1277 

1278 def list_blobs( 

1279 self, 

1280 max_results=None, 

1281 page_token=None, 

1282 prefix=None, 

1283 delimiter=None, 

1284 start_offset=None, 

1285 end_offset=None, 

1286 include_trailing_delimiter=None, 

1287 versions=None, 

1288 projection="noAcl", 

1289 fields=None, 

1290 client=None, 

1291 timeout=_DEFAULT_TIMEOUT, 

1292 retry=DEFAULT_RETRY, 

1293 ): 

1294 """Return an iterator used to find blobs in the bucket. 

1295 

1296 If :attr:`user_project` is set, bills the API request to that project. 

1297 

1298 :type max_results: int 

1299 :param max_results: 

1300 (Optional) The maximum number of blobs to return. 

1301 

1302 :type page_token: str 

1303 :param page_token: 

1304 (Optional) If present, return the next batch of blobs, using the 

1305 value, which must correspond to the ``nextPageToken`` value 

1306 returned in the previous response. Deprecated: use the ``pages`` 

1307 property of the returned iterator instead of manually passing the 

1308 token. 

1309 

1310 :type prefix: str 

1311 :param prefix: (Optional) Prefix used to filter blobs. 

1312 

1313 :type delimiter: str 

1314 :param delimiter: (Optional) Delimiter, used with ``prefix`` to 

1315 emulate hierarchy. 

1316 

1317 :type start_offset: str 

1318 :param start_offset: 

1319 (Optional) Filter results to objects whose names are 

1320 lexicographically equal to or after ``startOffset``. If 

1321 ``endOffset`` is also set, the objects listed will have names 

1322 between ``startOffset`` (inclusive) and ``endOffset`` (exclusive). 

1323 

1324 :type end_offset: str 

1325 :param end_offset: 

1326 (Optional) Filter results to objects whose names are 

1327 lexicographically before ``endOffset``. If ``startOffset`` is also 

1328 set, the objects listed will have names between ``startOffset`` 

1329 (inclusive) and ``endOffset`` (exclusive). 

1330 

1331 :type include_trailing_delimiter: boolean 

1332 :param include_trailing_delimiter: 

1333 (Optional) If true, objects that end in exactly one instance of 

1334 ``delimiter`` will have their metadata included in ``items`` in 

1335 addition to ``prefixes``. 

1336 

1337 :type versions: bool 

1338 :param versions: (Optional) Whether object versions should be returned 

1339 as separate blobs. 

1340 

1341 :type projection: str 

1342 :param projection: (Optional) If used, must be 'full' or 'noAcl'. 

1343 Defaults to ``'noAcl'``. Specifies the set of 

1344 properties to return. 

1345 

1346 :type fields: str 

1347 :param fields: 

1348 (Optional) Selector specifying which fields to include 

1349 in a partial response. Must be a list of fields. For 

1350 example to get a partial response with just the next 

1351 page token and the name and language of each blob returned: 

1352 ``'items(name,contentLanguage),nextPageToken'``. 

1353 See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields 

1354 

1355 :type client: :class:`~google.cloud.storage.client.Client` 

1356 :param client: (Optional) The client to use. If not passed, falls back 

1357 to the ``client`` stored on the current bucket. 

1358 

1359 :type timeout: float or tuple 

1360 :param timeout: 

1361 (Optional) The amount of time, in seconds, to wait 

1362 for the server response. See: :ref:`configuring_timeouts` 

1363 

1364 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1365 :param retry: 

1366 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1367 

1368 :rtype: :class:`~google.api_core.page_iterator.Iterator` 

1369 :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` 

1370 in this bucket matching the arguments. 

1371 """ 

1372 client = self._require_client(client) 

1373 return client.list_blobs( 

1374 self, 

1375 max_results=max_results, 

1376 page_token=page_token, 

1377 prefix=prefix, 

1378 delimiter=delimiter, 

1379 start_offset=start_offset, 

1380 end_offset=end_offset, 

1381 include_trailing_delimiter=include_trailing_delimiter, 

1382 versions=versions, 

1383 projection=projection, 

1384 fields=fields, 

1385 timeout=timeout, 

1386 retry=retry, 

1387 ) 

1388 

1389 def list_notifications( 

1390 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

1391 ): 

1392 """List Pub / Sub notifications for this bucket. 

1393 

1394 See: 

1395 https://cloud.google.com/storage/docs/json_api/v1/notifications/list 

1396 

1397 If :attr:`user_project` is set, bills the API request to that project. 

1398 

1399 :type client: :class:`~google.cloud.storage.client.Client` or 

1400 ``NoneType`` 

1401 :param client: (Optional) The client to use. If not passed, falls back 

1402 to the ``client`` stored on the current bucket. 

1403 :type timeout: float or tuple 

1404 :param timeout: 

1405 (Optional) The amount of time, in seconds, to wait 

1406 for the server response. See: :ref:`configuring_timeouts` 

1407 

1408 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1409 :param retry: 

1410 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1411 

1412 :rtype: list of :class:`.BucketNotification` 

1413 :returns: notification instances 

1414 """ 

1415 client = self._require_client(client) 

1416 path = self.path + "/notificationConfigs" 

1417 iterator = client._list_resource( 

1418 path, 

1419 _item_to_notification, 

1420 timeout=timeout, 

1421 retry=retry, 

1422 ) 

1423 iterator.bucket = self 

1424 return iterator 

1425 

1426 def get_notification( 

1427 self, 

1428 notification_id, 

1429 client=None, 

1430 timeout=_DEFAULT_TIMEOUT, 

1431 retry=DEFAULT_RETRY, 

1432 ): 

1433 """Get Pub / Sub notification for this bucket. 

1434 

1435 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/notifications/get) 

1436 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-print-pubsub-bucket-notification#storage_print_pubsub_bucket_notification-python). 

1437 

1438 If :attr:`user_project` is set, bills the API request to that project. 

1439 

1440 :type notification_id: str 

1441 :param notification_id: The notification id to retrieve the notification configuration. 

1442 

1443 :type client: :class:`~google.cloud.storage.client.Client` or 

1444 ``NoneType`` 

1445 :param client: (Optional) The client to use. If not passed, falls back 

1446 to the ``client`` stored on the current bucket. 

1447 :type timeout: float or tuple 

1448 :param timeout: 

1449 (Optional) The amount of time, in seconds, to wait 

1450 for the server response. See: :ref:`configuring_timeouts` 

1451 

1452 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1453 :param retry: 

1454 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1455 

1456 :rtype: :class:`.BucketNotification` 

1457 :returns: notification instance. 

1458 """ 

1459 notification = self.notification(notification_id=notification_id) 

1460 notification.reload(client=client, timeout=timeout, retry=retry) 

1461 return notification 

1462 

1463 def delete( 

1464 self, 

1465 force=False, 

1466 client=None, 

1467 if_metageneration_match=None, 

1468 if_metageneration_not_match=None, 

1469 timeout=_DEFAULT_TIMEOUT, 

1470 retry=DEFAULT_RETRY, 

1471 ): 

1472 """Delete this bucket. 

1473 

1474 The bucket **must** be empty in order to submit a delete request. If 

1475 ``force=True`` is passed, this will first attempt to delete all the 

1476 objects / blobs in the bucket (i.e. try to empty the bucket). 

1477 

1478 If the bucket doesn't exist, this will raise 

1479 :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty 

1480 (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`. 

1481 

1482 If ``force=True`` and the bucket contains more than 256 objects / blobs 

1483 this will cowardly refuse to delete the objects (or the bucket). This 

1484 is to prevent accidental bucket deletion and to prevent extremely long 

1485 runtime of this method. Also note that ``force=True`` is not supported 

1486 in a ``Batch`` context. 

1487 

1488 If :attr:`user_project` is set, bills the API request to that project. 

1489 

1490 :type force: bool 

1491 :param force: If True, empties the bucket's objects then deletes it. 

1492 

1493 :type client: :class:`~google.cloud.storage.client.Client` or 

1494 ``NoneType`` 

1495 :param client: (Optional) The client to use. If not passed, falls back 

1496 to the ``client`` stored on the current bucket. 

1497 

1498 :type if_metageneration_match: long 

1499 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

1500 blob's current metageneration matches the given value. 

1501 

1502 :type if_metageneration_not_match: long 

1503 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

1504 blob's current metageneration does not match the given value. 

1505 

1506 :type timeout: float or tuple 

1507 :param timeout: 

1508 (Optional) The amount of time, in seconds, to wait 

1509 for the server response. See: :ref:`configuring_timeouts` 

1510 

1511 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1512 :param retry: 

1513 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1514 

1515 :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket 

1516 contains more than 256 objects / blobs. 

1517 """ 

1518 client = self._require_client(client) 

1519 query_params = {} 

1520 

1521 if self.user_project is not None: 

1522 query_params["userProject"] = self.user_project 

1523 

1524 _add_generation_match_parameters( 

1525 query_params, 

1526 if_metageneration_match=if_metageneration_match, 

1527 if_metageneration_not_match=if_metageneration_not_match, 

1528 ) 

1529 if force: 

1530 blobs = list( 

1531 self.list_blobs( 

1532 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

1533 client=client, 

1534 timeout=timeout, 

1535 retry=retry, 

1536 ) 

1537 ) 

1538 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

1539 message = ( 

1540 "Refusing to delete bucket with more than " 

1541 "%d objects. If you actually want to delete " 

1542 "this bucket, please delete the objects " 

1543 "yourself before calling Bucket.delete()." 

1544 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

1545 raise ValueError(message) 

1546 

1547 # Ignore 404 errors on delete. 

1548 self.delete_blobs( 

1549 blobs, 

1550 on_error=lambda blob: None, 

1551 client=client, 

1552 timeout=timeout, 

1553 retry=retry, 

1554 ) 

1555 

1556 # We intentionally pass `_target_object=None` since a DELETE 

1557 # request has no response value (whether in a standard request or 

1558 # in a batch request). 

1559 client._delete_resource( 

1560 self.path, 

1561 query_params=query_params, 

1562 timeout=timeout, 

1563 retry=retry, 

1564 _target_object=None, 

1565 ) 

1566 

1567 def delete_blob( 

1568 self, 

1569 blob_name, 

1570 client=None, 

1571 generation=None, 

1572 if_generation_match=None, 

1573 if_generation_not_match=None, 

1574 if_metageneration_match=None, 

1575 if_metageneration_not_match=None, 

1576 timeout=_DEFAULT_TIMEOUT, 

1577 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1578 ): 

1579 """Deletes a blob from the current bucket. 

1580 

1581 If :attr:`user_project` is set, bills the API request to that project. 

1582 

1583 :type blob_name: str 

1584 :param blob_name: A blob name to delete. 

1585 

1586 :type client: :class:`~google.cloud.storage.client.Client` or 

1587 ``NoneType`` 

1588 :param client: (Optional) The client to use. If not passed, falls back 

1589 to the ``client`` stored on the current bucket. 

1590 

1591 :type generation: long 

1592 :param generation: (Optional) If present, permanently deletes a specific 

1593 revision of this object. 

1594 

1595 :type if_generation_match: long 

1596 :param if_generation_match: 

1597 (Optional) See :ref:`using-if-generation-match` 

1598 

1599 :type if_generation_not_match: long 

1600 :param if_generation_not_match: 

1601 (Optional) See :ref:`using-if-generation-not-match` 

1602 

1603 :type if_metageneration_match: long 

1604 :param if_metageneration_match: 

1605 (Optional) See :ref:`using-if-metageneration-match` 

1606 

1607 :type if_metageneration_not_match: long 

1608 :param if_metageneration_not_match: 

1609 (Optional) See :ref:`using-if-metageneration-not-match` 

1610 

1611 :type timeout: float or tuple 

1612 :param timeout: 

1613 (Optional) The amount of time, in seconds, to wait 

1614 for the server response. See: :ref:`configuring_timeouts` 

1615 

1616 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1617 :param retry: 

1618 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1619 

1620 :raises: :class:`google.cloud.exceptions.NotFound` Raises a NotFound 

1621 if the blob isn't found. To suppress 

1622 the exception, use :meth:`delete_blobs` by passing a no-op 

1623 ``on_error`` callback. 

1624 """ 

1625 client = self._require_client(client) 

1626 blob = Blob(blob_name, bucket=self, generation=generation) 

1627 

1628 query_params = copy.deepcopy(blob._query_params) 

1629 _add_generation_match_parameters( 

1630 query_params, 

1631 if_generation_match=if_generation_match, 

1632 if_generation_not_match=if_generation_not_match, 

1633 if_metageneration_match=if_metageneration_match, 

1634 if_metageneration_not_match=if_metageneration_not_match, 

1635 ) 

1636 # We intentionally pass `_target_object=None` since a DELETE 

1637 # request has no response value (whether in a standard request or 

1638 # in a batch request). 

1639 client._delete_resource( 

1640 blob.path, 

1641 query_params=query_params, 

1642 timeout=timeout, 

1643 retry=retry, 

1644 _target_object=None, 

1645 ) 

1646 

1647 def delete_blobs( 

1648 self, 

1649 blobs, 

1650 on_error=None, 

1651 client=None, 

1652 preserve_generation=False, 

1653 timeout=_DEFAULT_TIMEOUT, 

1654 if_generation_match=None, 

1655 if_generation_not_match=None, 

1656 if_metageneration_match=None, 

1657 if_metageneration_not_match=None, 

1658 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1659 ): 

1660 """Deletes a list of blobs from the current bucket. 

1661 

1662 Uses :meth:`delete_blob` to delete each individual blob. 

1663 

1664 By default, any generation information in the list of blobs is ignored, and the 

1665 live versions of all blobs are deleted. Set `preserve_generation` to True 

1666 if blob generation should instead be propagated from the list of blobs. 

1667 

1668 If :attr:`user_project` is set, bills the API request to that project. 

1669 

1670 :type blobs: list 

1671 :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or 

1672 blob names to delete. 

1673 

1674 :type on_error: callable 

1675 :param on_error: (Optional) Takes single argument: ``blob``. 

1676 Called once for each blob raising 

1677 :class:`~google.cloud.exceptions.NotFound`; 

1678 otherwise, the exception is propagated. 

1679 Note that ``on_error`` is not supported in a ``Batch`` context. 

1680 

1681 :type client: :class:`~google.cloud.storage.client.Client` 

1682 :param client: (Optional) The client to use. If not passed, falls back 

1683 to the ``client`` stored on the current bucket. 

1684 

1685 :type preserve_generation: bool 

1686 :param preserve_generation: (Optional) Deletes only the generation specified on the blob object, 

1687 instead of the live version, if set to True. Only :class:~google.cloud.storage.blob.Blob 

1688 objects can have their generation set in this way. 

1689 Default: False. 

1690 

1691 :type if_generation_match: list of long 

1692 :param if_generation_match: 

1693 (Optional) See :ref:`using-if-generation-match` 

1694 Note that the length of the list must match the length of 

1695 The list must match ``blobs`` item-to-item. 

1696 

1697 :type if_generation_not_match: list of long 

1698 :param if_generation_not_match: 

1699 (Optional) See :ref:`using-if-generation-not-match` 

1700 The list must match ``blobs`` item-to-item. 

1701 

1702 :type if_metageneration_match: list of long 

1703 :param if_metageneration_match: 

1704 (Optional) See :ref:`using-if-metageneration-match` 

1705 The list must match ``blobs`` item-to-item. 

1706 

1707 :type if_metageneration_not_match: list of long 

1708 :param if_metageneration_not_match: 

1709 (Optional) See :ref:`using-if-metageneration-not-match` 

1710 The list must match ``blobs`` item-to-item. 

1711 

1712 :type timeout: float or tuple 

1713 :param timeout: 

1714 (Optional) The amount of time, in seconds, to wait 

1715 for the server response. See: :ref:`configuring_timeouts` 

1716 

1717 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1718 :param retry: 

1719 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1720 

1721 :raises: :class:`~google.cloud.exceptions.NotFound` (if 

1722 `on_error` is not passed). 

1723 """ 

1724 _raise_if_len_differs( 

1725 len(blobs), 

1726 if_generation_match=if_generation_match, 

1727 if_generation_not_match=if_generation_not_match, 

1728 if_metageneration_match=if_metageneration_match, 

1729 if_metageneration_not_match=if_metageneration_not_match, 

1730 ) 

1731 if_generation_match = iter(if_generation_match or []) 

1732 if_generation_not_match = iter(if_generation_not_match or []) 

1733 if_metageneration_match = iter(if_metageneration_match or []) 

1734 if_metageneration_not_match = iter(if_metageneration_not_match or []) 

1735 

1736 for blob in blobs: 

1737 try: 

1738 blob_name = blob 

1739 generation = None 

1740 if not isinstance(blob_name, str): 

1741 blob_name = blob.name 

1742 generation = blob.generation if preserve_generation else None 

1743 

1744 self.delete_blob( 

1745 blob_name, 

1746 client=client, 

1747 generation=generation, 

1748 if_generation_match=next(if_generation_match, None), 

1749 if_generation_not_match=next(if_generation_not_match, None), 

1750 if_metageneration_match=next(if_metageneration_match, None), 

1751 if_metageneration_not_match=next(if_metageneration_not_match, None), 

1752 timeout=timeout, 

1753 retry=retry, 

1754 ) 

1755 except NotFound: 

1756 if on_error is not None: 

1757 on_error(blob) 

1758 else: 

1759 raise 

1760 

1761 def copy_blob( 

1762 self, 

1763 blob, 

1764 destination_bucket, 

1765 new_name=None, 

1766 client=None, 

1767 preserve_acl=True, 

1768 source_generation=None, 

1769 if_generation_match=None, 

1770 if_generation_not_match=None, 

1771 if_metageneration_match=None, 

1772 if_metageneration_not_match=None, 

1773 if_source_generation_match=None, 

1774 if_source_generation_not_match=None, 

1775 if_source_metageneration_match=None, 

1776 if_source_metageneration_not_match=None, 

1777 timeout=_DEFAULT_TIMEOUT, 

1778 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1779 ): 

1780 """Copy the given blob to the given bucket, optionally with a new name. 

1781 

1782 If :attr:`user_project` is set, bills the API request to that project. 

1783 

1784 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/objects/copy) 

1785 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-copy-file#storage_copy_file-python). 

1786 

1787 :type blob: :class:`google.cloud.storage.blob.Blob` 

1788 :param blob: The blob to be copied. 

1789 

1790 :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` 

1791 :param destination_bucket: The bucket into which the blob should be 

1792 copied. 

1793 

1794 :type new_name: str 

1795 :param new_name: (Optional) The new name for the copied file. 

1796 

1797 :type client: :class:`~google.cloud.storage.client.Client` or 

1798 ``NoneType`` 

1799 :param client: (Optional) The client to use. If not passed, falls back 

1800 to the ``client`` stored on the current bucket. 

1801 

1802 :type preserve_acl: bool 

1803 :param preserve_acl: DEPRECATED. This argument is not functional! 

1804 (Optional) Copies ACL from old blob to new blob. 

1805 Default: True. 

1806 Note that ``preserve_acl`` is not supported in a 

1807 ``Batch`` context. 

1808 

1809 :type source_generation: long 

1810 :param source_generation: (Optional) The generation of the blob to be 

1811 copied. 

1812 

1813 :type if_generation_match: long 

1814 :param if_generation_match: 

1815 (Optional) See :ref:`using-if-generation-match` 

1816 Note that the generation to be matched is that of the 

1817 ``destination`` blob. 

1818 

1819 :type if_generation_not_match: long 

1820 :param if_generation_not_match: 

1821 (Optional) See :ref:`using-if-generation-not-match` 

1822 Note that the generation to be matched is that of the 

1823 ``destination`` blob. 

1824 

1825 :type if_metageneration_match: long 

1826 :param if_metageneration_match: 

1827 (Optional) See :ref:`using-if-metageneration-match` 

1828 Note that the metageneration to be matched is that of the 

1829 ``destination`` blob. 

1830 

1831 :type if_metageneration_not_match: long 

1832 :param if_metageneration_not_match: 

1833 (Optional) See :ref:`using-if-metageneration-not-match` 

1834 Note that the metageneration to be matched is that of the 

1835 ``destination`` blob. 

1836 

1837 :type if_source_generation_match: long 

1838 :param if_source_generation_match: 

1839 (Optional) Makes the operation conditional on whether the source 

1840 object's generation matches the given value. 

1841 

1842 :type if_source_generation_not_match: long 

1843 :param if_source_generation_not_match: 

1844 (Optional) Makes the operation conditional on whether the source 

1845 object's generation does not match the given value. 

1846 

1847 :type if_source_metageneration_match: long 

1848 :param if_source_metageneration_match: 

1849 (Optional) Makes the operation conditional on whether the source 

1850 object's current metageneration matches the given value. 

1851 

1852 :type if_source_metageneration_not_match: long 

1853 :param if_source_metageneration_not_match: 

1854 (Optional) Makes the operation conditional on whether the source 

1855 object's current metageneration does not match the given value. 

1856 

1857 :type timeout: float or tuple 

1858 :param timeout: 

1859 (Optional) The amount of time, in seconds, to wait 

1860 for the server response. See: :ref:`configuring_timeouts` 

1861 

1862 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

1863 :param retry: 

1864 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

1865 

1866 :rtype: :class:`google.cloud.storage.blob.Blob` 

1867 :returns: The new Blob. 

1868 """ 

1869 client = self._require_client(client) 

1870 query_params = {} 

1871 

1872 if self.user_project is not None: 

1873 query_params["userProject"] = self.user_project 

1874 

1875 if source_generation is not None: 

1876 query_params["sourceGeneration"] = source_generation 

1877 

1878 _add_generation_match_parameters( 

1879 query_params, 

1880 if_generation_match=if_generation_match, 

1881 if_generation_not_match=if_generation_not_match, 

1882 if_metageneration_match=if_metageneration_match, 

1883 if_metageneration_not_match=if_metageneration_not_match, 

1884 if_source_generation_match=if_source_generation_match, 

1885 if_source_generation_not_match=if_source_generation_not_match, 

1886 if_source_metageneration_match=if_source_metageneration_match, 

1887 if_source_metageneration_not_match=if_source_metageneration_not_match, 

1888 ) 

1889 

1890 if new_name is None: 

1891 new_name = blob.name 

1892 

1893 new_blob = Blob(bucket=destination_bucket, name=new_name) 

1894 api_path = blob.path + "/copyTo" + new_blob.path 

1895 copy_result = client._post_resource( 

1896 api_path, 

1897 None, 

1898 query_params=query_params, 

1899 timeout=timeout, 

1900 retry=retry, 

1901 _target_object=new_blob, 

1902 ) 

1903 

1904 if not preserve_acl: 

1905 new_blob.acl.save(acl={}, client=client, timeout=timeout) 

1906 

1907 new_blob._set_properties(copy_result) 

1908 return new_blob 

1909 

1910 def rename_blob( 

1911 self, 

1912 blob, 

1913 new_name, 

1914 client=None, 

1915 if_generation_match=None, 

1916 if_generation_not_match=None, 

1917 if_metageneration_match=None, 

1918 if_metageneration_not_match=None, 

1919 if_source_generation_match=None, 

1920 if_source_generation_not_match=None, 

1921 if_source_metageneration_match=None, 

1922 if_source_metageneration_not_match=None, 

1923 timeout=_DEFAULT_TIMEOUT, 

1924 retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, 

1925 ): 

1926 """Rename the given blob using copy and delete operations. 

1927 

1928 If :attr:`user_project` is set, bills the API request to that project. 

1929 

1930 Effectively, copies blob to the same bucket with a new name, then 

1931 deletes the blob. 

1932 

1933 .. warning:: 

1934 

1935 This method will first duplicate the data and then delete the 

1936 old blob. This means that with very large objects renaming 

1937 could be a very (temporarily) costly or a very slow operation. 

1938 If you need more control over the copy and deletion, instead 

1939 use ``google.cloud.storage.blob.Blob.copy_to`` and 

1940 ``google.cloud.storage.blob.Blob.delete`` directly. 

1941 

1942 Also note that this method is not fully supported in a 

1943 ``Batch`` context. 

1944 

1945 :type blob: :class:`google.cloud.storage.blob.Blob` 

1946 :param blob: The blob to be renamed. 

1947 

1948 :type new_name: str 

1949 :param new_name: The new name for this blob. 

1950 

1951 :type client: :class:`~google.cloud.storage.client.Client` or 

1952 ``NoneType`` 

1953 :param client: (Optional) The client to use. If not passed, falls back 

1954 to the ``client`` stored on the current bucket. 

1955 

1956 :type if_generation_match: long 

1957 :param if_generation_match: 

1958 (Optional) See :ref:`using-if-generation-match` 

1959 Note that the generation to be matched is that of the 

1960 ``destination`` blob. 

1961 

1962 :type if_generation_not_match: long 

1963 :param if_generation_not_match: 

1964 (Optional) See :ref:`using-if-generation-not-match` 

1965 Note that the generation to be matched is that of the 

1966 ``destination`` blob. 

1967 

1968 :type if_metageneration_match: long 

1969 :param if_metageneration_match: 

1970 (Optional) See :ref:`using-if-metageneration-match` 

1971 Note that the metageneration to be matched is that of the 

1972 ``destination`` blob. 

1973 

1974 :type if_metageneration_not_match: long 

1975 :param if_metageneration_not_match: 

1976 (Optional) See :ref:`using-if-metageneration-not-match` 

1977 Note that the metageneration to be matched is that of the 

1978 ``destination`` blob. 

1979 

1980 :type if_source_generation_match: long 

1981 :param if_source_generation_match: 

1982 (Optional) Makes the operation conditional on whether the source 

1983 object's generation matches the given value. Also used in the 

1984 (implied) delete request. 

1985 

1986 :type if_source_generation_not_match: long 

1987 :param if_source_generation_not_match: 

1988 (Optional) Makes the operation conditional on whether the source 

1989 object's generation does not match the given value. Also used in 

1990 the (implied) delete request. 

1991 

1992 :type if_source_metageneration_match: long 

1993 :param if_source_metageneration_match: 

1994 (Optional) Makes the operation conditional on whether the source 

1995 object's current metageneration matches the given value. Also used 

1996 in the (implied) delete request. 

1997 

1998 :type if_source_metageneration_not_match: long 

1999 :param if_source_metageneration_not_match: 

2000 (Optional) Makes the operation conditional on whether the source 

2001 object's current metageneration does not match the given value. 

2002 Also used in the (implied) delete request. 

2003 

2004 :type timeout: float or tuple 

2005 :param timeout: 

2006 (Optional) The amount of time, in seconds, to wait 

2007 for the server response. See: :ref:`configuring_timeouts` 

2008 

2009 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2010 :param retry: 

2011 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

2012 

2013 :rtype: :class:`Blob` 

2014 :returns: The newly-renamed blob. 

2015 """ 

2016 same_name = blob.name == new_name 

2017 

2018 new_blob = self.copy_blob( 

2019 blob, 

2020 self, 

2021 new_name, 

2022 client=client, 

2023 timeout=timeout, 

2024 if_generation_match=if_generation_match, 

2025 if_generation_not_match=if_generation_not_match, 

2026 if_metageneration_match=if_metageneration_match, 

2027 if_metageneration_not_match=if_metageneration_not_match, 

2028 if_source_generation_match=if_source_generation_match, 

2029 if_source_generation_not_match=if_source_generation_not_match, 

2030 if_source_metageneration_match=if_source_metageneration_match, 

2031 if_source_metageneration_not_match=if_source_metageneration_not_match, 

2032 retry=retry, 

2033 ) 

2034 

2035 if not same_name: 

2036 blob.delete( 

2037 client=client, 

2038 timeout=timeout, 

2039 if_generation_match=if_source_generation_match, 

2040 if_generation_not_match=if_source_generation_not_match, 

2041 if_metageneration_match=if_source_metageneration_match, 

2042 if_metageneration_not_match=if_source_metageneration_not_match, 

2043 retry=retry, 

2044 ) 

2045 return new_blob 

2046 

2047 @property 

2048 def cors(self): 

2049 """Retrieve or set CORS policies configured for this bucket. 

2050 

2051 See http://www.w3.org/TR/cors/ and 

2052 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2053 

2054 .. note:: 

2055 

2056 The getter for this property returns a list which contains 

2057 *copies* of the bucket's CORS policy mappings. Mutating the list 

2058 or one of its dicts has no effect unless you then re-assign the 

2059 dict via the setter. E.g.: 

2060 

2061 >>> policies = bucket.cors 

2062 >>> policies.append({'origin': '/foo', ...}) 

2063 >>> policies[1]['maxAgeSeconds'] = 3600 

2064 >>> del policies[0] 

2065 >>> bucket.cors = policies 

2066 >>> bucket.update() 

2067 

2068 :setter: Set CORS policies for this bucket. 

2069 :getter: Gets the CORS policies for this bucket. 

2070 

2071 :rtype: list of dictionaries 

2072 :returns: A sequence of mappings describing each CORS policy. 

2073 """ 

2074 return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())] 

2075 

2076 @cors.setter 

2077 def cors(self, entries): 

2078 """Set CORS policies configured for this bucket. 

2079 

2080 See http://www.w3.org/TR/cors/ and 

2081 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2082 

2083 :type entries: list of dictionaries 

2084 :param entries: A sequence of mappings describing each CORS policy. 

2085 """ 

2086 self._patch_property("cors", entries) 

2087 

2088 default_event_based_hold = _scalar_property("defaultEventBasedHold") 

2089 """Are uploaded objects automatically placed under an even-based hold? 

2090 

2091 If True, uploaded objects will be placed under an event-based hold to 

2092 be released at a future time. When released an object will then begin 

2093 the retention period determined by the policy retention period for the 

2094 object bucket. 

2095 

2096 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2097 

2098 If the property is not set locally, returns ``None``. 

2099 

2100 :rtype: bool or ``NoneType`` 

2101 """ 

2102 

2103 @property 

2104 def default_kms_key_name(self): 

2105 """Retrieve / set default KMS encryption key for objects in the bucket. 

2106 

2107 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2108 

2109 :setter: Set default KMS encryption key for items in this bucket. 

2110 :getter: Get default KMS encryption key for items in this bucket. 

2111 

2112 :rtype: str 

2113 :returns: Default KMS encryption key, or ``None`` if not set. 

2114 """ 

2115 encryption_config = self._properties.get("encryption", {}) 

2116 return encryption_config.get("defaultKmsKeyName") 

2117 

2118 @default_kms_key_name.setter 

2119 def default_kms_key_name(self, value): 

2120 """Set default KMS encryption key for objects in the bucket. 

2121 

2122 :type value: str or None 

2123 :param value: new KMS key name (None to clear any existing key). 

2124 """ 

2125 encryption_config = self._properties.get("encryption", {}) 

2126 encryption_config["defaultKmsKeyName"] = value 

2127 self._patch_property("encryption", encryption_config) 

2128 

2129 @property 

2130 def labels(self): 

2131 """Retrieve or set labels assigned to this bucket. 

2132 

2133 See 

2134 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2135 

2136 .. note:: 

2137 

2138 The getter for this property returns a dict which is a *copy* 

2139 of the bucket's labels. Mutating that dict has no effect unless 

2140 you then re-assign the dict via the setter. E.g.: 

2141 

2142 >>> labels = bucket.labels 

2143 >>> labels['new_key'] = 'some-label' 

2144 >>> del labels['old_key'] 

2145 >>> bucket.labels = labels 

2146 >>> bucket.update() 

2147 

2148 :setter: Set labels for this bucket. 

2149 :getter: Gets the labels for this bucket. 

2150 

2151 :rtype: :class:`dict` 

2152 :returns: Name-value pairs (string->string) labelling the bucket. 

2153 """ 

2154 labels = self._properties.get("labels") 

2155 if labels is None: 

2156 return {} 

2157 return copy.deepcopy(labels) 

2158 

2159 @labels.setter 

2160 def labels(self, mapping): 

2161 """Set labels assigned to this bucket. 

2162 

2163 See 

2164 https://cloud.google.com/storage/docs/json_api/v1/buckets#labels 

2165 

2166 :type mapping: :class:`dict` 

2167 :param mapping: Name-value pairs (string->string) labelling the bucket. 

2168 """ 

2169 # If any labels have been expressly removed, we need to track this 

2170 # so that a future .patch() call can do the correct thing. 

2171 existing = set([k for k in self.labels.keys()]) 

2172 incoming = set([k for k in mapping.keys()]) 

2173 self._label_removals = self._label_removals.union(existing.difference(incoming)) 

2174 mapping = {k: str(v) for k, v in mapping.items()} 

2175 

2176 # Actually update the labels on the object. 

2177 self._patch_property("labels", copy.deepcopy(mapping)) 

2178 

2179 @property 

2180 def etag(self): 

2181 """Retrieve the ETag for the bucket. 

2182 

2183 See https://tools.ietf.org/html/rfc2616#section-3.11 and 

2184 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2185 

2186 :rtype: str or ``NoneType`` 

2187 :returns: The bucket etag or ``None`` if the bucket's 

2188 resource has not been loaded from the server. 

2189 """ 

2190 return self._properties.get("etag") 

2191 

2192 @property 

2193 def id(self): 

2194 """Retrieve the ID for the bucket. 

2195 

2196 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2197 

2198 :rtype: str or ``NoneType`` 

2199 :returns: The ID of the bucket or ``None`` if the bucket's 

2200 resource has not been loaded from the server. 

2201 """ 

2202 return self._properties.get("id") 

2203 

2204 @property 

2205 def iam_configuration(self): 

2206 """Retrieve IAM configuration for this bucket. 

2207 

2208 :rtype: :class:`IAMConfiguration` 

2209 :returns: an instance for managing the bucket's IAM configuration. 

2210 """ 

2211 info = self._properties.get("iamConfiguration", {}) 

2212 return IAMConfiguration.from_api_repr(info, self) 

2213 

2214 @property 

2215 def lifecycle_rules(self): 

2216 """Retrieve or set lifecycle rules configured for this bucket. 

2217 

2218 See https://cloud.google.com/storage/docs/lifecycle and 

2219 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2220 

2221 .. note:: 

2222 

2223 The getter for this property returns a generator which yields 

2224 *copies* of the bucket's lifecycle rules mappings. Mutating the 

2225 output dicts has no effect unless you then re-assign the dict via 

2226 the setter. E.g.: 

2227 

2228 >>> rules = list(bucket.lifecycle_rules) 

2229 >>> rules.append({'origin': '/foo', ...}) 

2230 >>> rules[1]['rule']['action']['type'] = 'Delete' 

2231 >>> del rules[0] 

2232 >>> bucket.lifecycle_rules = rules 

2233 >>> bucket.update() 

2234 

2235 :setter: Set lifecycle rules for this bucket. 

2236 :getter: Gets the lifecycle rules for this bucket. 

2237 

2238 :rtype: generator(dict) 

2239 :returns: A sequence of mappings describing each lifecycle rule. 

2240 """ 

2241 info = self._properties.get("lifecycle", {}) 

2242 for rule in info.get("rule", ()): 

2243 action_type = rule["action"]["type"] 

2244 if action_type == "Delete": 

2245 yield LifecycleRuleDelete.from_api_repr(rule) 

2246 elif action_type == "SetStorageClass": 

2247 yield LifecycleRuleSetStorageClass.from_api_repr(rule) 

2248 elif action_type == "AbortIncompleteMultipartUpload": 

2249 yield LifecycleRuleAbortIncompleteMultipartUpload.from_api_repr(rule) 

2250 else: 

2251 warnings.warn( 

2252 "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format( 

2253 rule 

2254 ), 

2255 UserWarning, 

2256 stacklevel=1, 

2257 ) 

2258 

2259 @lifecycle_rules.setter 

2260 def lifecycle_rules(self, rules): 

2261 """Set lifecycle rules configured for this bucket. 

2262 

2263 See https://cloud.google.com/storage/docs/lifecycle and 

2264 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2265 

2266 :type rules: list of dictionaries 

2267 :param rules: A sequence of mappings describing each lifecycle rule. 

2268 """ 

2269 rules = [dict(rule) for rule in rules] # Convert helpers if needed 

2270 self._patch_property("lifecycle", {"rule": rules}) 

2271 

2272 def clear_lifecyle_rules(self): 

2273 """Clear lifecycle rules configured for this bucket. 

2274 

2275 See https://cloud.google.com/storage/docs/lifecycle and 

2276 https://cloud.google.com/storage/docs/json_api/v1/buckets 

2277 """ 

2278 self.lifecycle_rules = [] 

2279 

2280 def add_lifecycle_delete_rule(self, **kw): 

2281 """Add a "delete" rule to lifecycle rules configured for this bucket. 

2282 

2283 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2284 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2285 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2286 See also a [code sample](https://cloud.google.com/storage/docs/samples/storage-enable-bucket-lifecycle-management#storage_enable_bucket_lifecycle_management-python). 

2287 

2288 :type kw: dict 

2289 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2290 """ 

2291 rules = list(self.lifecycle_rules) 

2292 rules.append(LifecycleRuleDelete(**kw)) 

2293 self.lifecycle_rules = rules 

2294 

2295 def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): 

2296 """Add a "set storage class" rule to lifecycle rules. 

2297 

2298 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2299 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2300 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2301 

2302 :type storage_class: str, one of :attr:`STORAGE_CLASSES`. 

2303 :param storage_class: new storage class to assign to matching items. 

2304 

2305 :type kw: dict 

2306 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2307 """ 

2308 rules = list(self.lifecycle_rules) 

2309 rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) 

2310 self.lifecycle_rules = rules 

2311 

2312 def add_lifecycle_abort_incomplete_multipart_upload_rule(self, **kw): 

2313 """Add a "abort incomplete multipart upload" rule to lifecycle rules. 

2314 

2315 .. note:: 

2316 The "age" lifecycle condition is the only supported condition 

2317 for this rule. 

2318 

2319 This defines a [lifecycle configuration](https://cloud.google.com/storage/docs/lifecycle), 

2320 which is set on the bucket. For the general format of a lifecycle configuration, see the 

2321 [bucket resource representation for JSON](https://cloud.google.com/storage/docs/json_api/v1/buckets). 

2322 

2323 :type kw: dict 

2324 :params kw: arguments passed to :class:`LifecycleRuleConditions`. 

2325 """ 

2326 rules = list(self.lifecycle_rules) 

2327 rules.append(LifecycleRuleAbortIncompleteMultipartUpload(**kw)) 

2328 self.lifecycle_rules = rules 

2329 

2330 _location = _scalar_property("location") 

2331 

2332 @property 

2333 def location(self): 

2334 """Retrieve location configured for this bucket. 

2335 

2336 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2337 https://cloud.google.com/storage/docs/locations 

2338 

2339 Returns ``None`` if the property has not been set before creation, 

2340 or if the bucket's resource has not been loaded from the server. 

2341 :rtype: str or ``NoneType`` 

2342 """ 

2343 return self._location 

2344 

2345 @location.setter 

2346 def location(self, value): 

2347 """(Deprecated) Set `Bucket.location` 

2348 

2349 This can only be set at bucket **creation** time. 

2350 

2351 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2352 https://cloud.google.com/storage/docs/bucket-locations 

2353 

2354 .. warning:: 

2355 

2356 Assignment to 'Bucket.location' is deprecated, as it is only 

2357 valid before the bucket is created. Instead, pass the location 

2358 to `Bucket.create`. 

2359 """ 

2360 warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) 

2361 self._location = value 

2362 

2363 @property 

2364 def data_locations(self): 

2365 """Retrieve the list of regional locations for custom dual-region buckets. 

2366 

2367 See https://cloud.google.com/storage/docs/json_api/v1/buckets and 

2368 https://cloud.google.com/storage/docs/locations 

2369 

2370 Returns ``None`` if the property has not been set before creation, 

2371 if the bucket's resource has not been loaded from the server, 

2372 or if the bucket is not a dual-regions bucket. 

2373 :rtype: list of str or ``NoneType`` 

2374 """ 

2375 custom_placement_config = self._properties.get("customPlacementConfig", {}) 

2376 return custom_placement_config.get("dataLocations") 

2377 

2378 @property 

2379 def location_type(self): 

2380 """Retrieve the location type for the bucket. 

2381 

2382 See https://cloud.google.com/storage/docs/storage-classes 

2383 

2384 :getter: Gets the the location type for this bucket. 

2385 

2386 :rtype: str or ``NoneType`` 

2387 :returns: 

2388 If set, one of 

2389 :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`, 

2390 :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or 

2391 :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`, 

2392 else ``None``. 

2393 """ 

2394 return self._properties.get("locationType") 

2395 

2396 def get_logging(self): 

2397 """Return info about access logging for this bucket. 

2398 

2399 See https://cloud.google.com/storage/docs/access-logs#status 

2400 

2401 :rtype: dict or None 

2402 :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` 

2403 (if logging is enabled), or None (if not). 

2404 """ 

2405 info = self._properties.get("logging") 

2406 return copy.deepcopy(info) 

2407 

2408 def enable_logging(self, bucket_name, object_prefix=""): 

2409 """Enable access logging for this bucket. 

2410 

2411 See https://cloud.google.com/storage/docs/access-logs 

2412 

2413 :type bucket_name: str 

2414 :param bucket_name: name of bucket in which to store access logs 

2415 

2416 :type object_prefix: str 

2417 :param object_prefix: prefix for access log filenames 

2418 """ 

2419 info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix} 

2420 self._patch_property("logging", info) 

2421 

2422 def disable_logging(self): 

2423 """Disable access logging for this bucket. 

2424 

2425 See https://cloud.google.com/storage/docs/access-logs#disabling 

2426 """ 

2427 self._patch_property("logging", None) 

2428 

2429 @property 

2430 def metageneration(self): 

2431 """Retrieve the metageneration for the bucket. 

2432 

2433 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2434 

2435 :rtype: int or ``NoneType`` 

2436 :returns: The metageneration of the bucket or ``None`` if the bucket's 

2437 resource has not been loaded from the server. 

2438 """ 

2439 metageneration = self._properties.get("metageneration") 

2440 if metageneration is not None: 

2441 return int(metageneration) 

2442 

2443 @property 

2444 def owner(self): 

2445 """Retrieve info about the owner of the bucket. 

2446 

2447 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2448 

2449 :rtype: dict or ``NoneType`` 

2450 :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's 

2451 resource has not been loaded from the server. 

2452 """ 

2453 return copy.deepcopy(self._properties.get("owner")) 

2454 

2455 @property 

2456 def project_number(self): 

2457 """Retrieve the number of the project to which the bucket is assigned. 

2458 

2459 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2460 

2461 :rtype: int or ``NoneType`` 

2462 :returns: The project number that owns the bucket or ``None`` if 

2463 the bucket's resource has not been loaded from the server. 

2464 """ 

2465 project_number = self._properties.get("projectNumber") 

2466 if project_number is not None: 

2467 return int(project_number) 

2468 

2469 @property 

2470 def retention_policy_effective_time(self): 

2471 """Retrieve the effective time of the bucket's retention policy. 

2472 

2473 :rtype: datetime.datetime or ``NoneType`` 

2474 :returns: point-in time at which the bucket's retention policy is 

2475 effective, or ``None`` if the property is not 

2476 set locally. 

2477 """ 

2478 policy = self._properties.get("retentionPolicy") 

2479 if policy is not None: 

2480 timestamp = policy.get("effectiveTime") 

2481 if timestamp is not None: 

2482 return _rfc3339_nanos_to_datetime(timestamp) 

2483 

2484 @property 

2485 def retention_policy_locked(self): 

2486 """Retrieve whthere the bucket's retention policy is locked. 

2487 

2488 :rtype: bool 

2489 :returns: True if the bucket's policy is locked, or else False 

2490 if the policy is not locked, or the property is not 

2491 set locally. 

2492 """ 

2493 policy = self._properties.get("retentionPolicy") 

2494 if policy is not None: 

2495 return policy.get("isLocked") 

2496 

2497 @property 

2498 def retention_period(self): 

2499 """Retrieve or set the retention period for items in the bucket. 

2500 

2501 :rtype: int or ``NoneType`` 

2502 :returns: number of seconds to retain items after upload or release 

2503 from event-based lock, or ``None`` if the property is not 

2504 set locally. 

2505 """ 

2506 policy = self._properties.get("retentionPolicy") 

2507 if policy is not None: 

2508 period = policy.get("retentionPeriod") 

2509 if period is not None: 

2510 return int(period) 

2511 

2512 @retention_period.setter 

2513 def retention_period(self, value): 

2514 """Set the retention period for items in the bucket. 

2515 

2516 :type value: int 

2517 :param value: 

2518 number of seconds to retain items after upload or release from 

2519 event-based lock. 

2520 

2521 :raises ValueError: if the bucket's retention policy is locked. 

2522 """ 

2523 policy = self._properties.setdefault("retentionPolicy", {}) 

2524 if value is not None: 

2525 policy["retentionPeriod"] = str(value) 

2526 else: 

2527 policy = None 

2528 self._patch_property("retentionPolicy", policy) 

2529 

2530 @property 

2531 def self_link(self): 

2532 """Retrieve the URI for the bucket. 

2533 

2534 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2535 

2536 :rtype: str or ``NoneType`` 

2537 :returns: The self link for the bucket or ``None`` if 

2538 the bucket's resource has not been loaded from the server. 

2539 """ 

2540 return self._properties.get("selfLink") 

2541 

2542 @property 

2543 def storage_class(self): 

2544 """Retrieve or set the storage class for the bucket. 

2545 

2546 See https://cloud.google.com/storage/docs/storage-classes 

2547 

2548 :setter: Set the storage class for this bucket. 

2549 :getter: Gets the the storage class for this bucket. 

2550 

2551 :rtype: str or ``NoneType`` 

2552 :returns: 

2553 If set, one of 

2554 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

2555 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

2556 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

2557 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

2558 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

2559 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

2560 or 

2561 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

2562 else ``None``. 

2563 """ 

2564 return self._properties.get("storageClass") 

2565 

2566 @storage_class.setter 

2567 def storage_class(self, value): 

2568 """Set the storage class for the bucket. 

2569 

2570 See https://cloud.google.com/storage/docs/storage-classes 

2571 

2572 :type value: str 

2573 :param value: 

2574 One of 

2575 :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, 

2576 :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, 

2577 :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, 

2578 :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, 

2579 :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, 

2580 :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, 

2581 or 

2582 :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, 

2583 """ 

2584 self._patch_property("storageClass", value) 

2585 

2586 @property 

2587 def time_created(self): 

2588 """Retrieve the timestamp at which the bucket was created. 

2589 

2590 See https://cloud.google.com/storage/docs/json_api/v1/buckets 

2591 

2592 :rtype: :class:`datetime.datetime` or ``NoneType`` 

2593 :returns: Datetime object parsed from RFC3339 valid timestamp, or 

2594 ``None`` if the bucket's resource has not been loaded 

2595 from the server. 

2596 """ 

2597 value = self._properties.get("timeCreated") 

2598 if value is not None: 

2599 return _rfc3339_nanos_to_datetime(value) 

2600 

2601 @property 

2602 def versioning_enabled(self): 

2603 """Is versioning enabled for this bucket? 

2604 

2605 See https://cloud.google.com/storage/docs/object-versioning for 

2606 details. 

2607 

2608 :setter: Update whether versioning is enabled for this bucket. 

2609 :getter: Query whether versioning is enabled for this bucket. 

2610 

2611 :rtype: bool 

2612 :returns: True if enabled, else False. 

2613 """ 

2614 versioning = self._properties.get("versioning", {}) 

2615 return versioning.get("enabled", False) 

2616 

2617 @versioning_enabled.setter 

2618 def versioning_enabled(self, value): 

2619 """Enable versioning for this bucket. 

2620 

2621 See https://cloud.google.com/storage/docs/object-versioning for 

2622 details. 

2623 

2624 :type value: convertible to boolean 

2625 :param value: should versioning be enabled for the bucket? 

2626 """ 

2627 self._patch_property("versioning", {"enabled": bool(value)}) 

2628 

2629 @property 

2630 def requester_pays(self): 

2631 """Does the requester pay for API requests for this bucket? 

2632 

2633 See https://cloud.google.com/storage/docs/requester-pays for 

2634 details. 

2635 

2636 :setter: Update whether requester pays for this bucket. 

2637 :getter: Query whether requester pays for this bucket. 

2638 

2639 :rtype: bool 

2640 :returns: True if requester pays for API requests for the bucket, 

2641 else False. 

2642 """ 

2643 versioning = self._properties.get("billing", {}) 

2644 return versioning.get("requesterPays", False) 

2645 

2646 @requester_pays.setter 

2647 def requester_pays(self, value): 

2648 """Update whether requester pays for API requests for this bucket. 

2649 

2650 See https://cloud.google.com/storage/docs/using-requester-pays for 

2651 details. 

2652 

2653 :type value: convertible to boolean 

2654 :param value: should requester pay for API requests for the bucket? 

2655 """ 

2656 self._patch_property("billing", {"requesterPays": bool(value)}) 

2657 

2658 @property 

2659 def autoclass_enabled(self): 

2660 """Whether Autoclass is enabled for this bucket. 

2661 

2662 See https://cloud.google.com/storage/docs/using-autoclass for details. 

2663 

2664 :setter: Update whether autoclass is enabled for this bucket. 

2665 :getter: Query whether autoclass is enabled for this bucket. 

2666 

2667 :rtype: bool 

2668 :returns: True if enabled, else False. 

2669 """ 

2670 autoclass = self._properties.get("autoclass", {}) 

2671 return autoclass.get("enabled", False) 

2672 

2673 @autoclass_enabled.setter 

2674 def autoclass_enabled(self, value): 

2675 """Enable or disable Autoclass at the bucket-level. 

2676 

2677 See https://cloud.google.com/storage/docs/using-autoclass for details. 

2678 

2679 :type value: convertible to boolean 

2680 :param value: If true, enable Autoclass for this bucket. 

2681 If false, disable Autoclass for this bucket. 

2682 

2683 .. note:: 

2684 To enable autoclass, you must set it at bucket creation time. 

2685 Currently, only patch requests that disable autoclass are supported. 

2686 

2687 """ 

2688 self._patch_property("autoclass", {"enabled": bool(value)}) 

2689 

2690 @property 

2691 def autoclass_toggle_time(self): 

2692 """Retrieve the toggle time when Autoclaass was last enabled or disabled for the bucket. 

2693 :rtype: datetime.datetime or ``NoneType`` 

2694 :returns: point-in time at which the bucket's autoclass is toggled, or ``None`` if the property is not set locally. 

2695 """ 

2696 autoclass = self._properties.get("autoclass") 

2697 if autoclass is not None: 

2698 timestamp = autoclass.get("toggleTime") 

2699 if timestamp is not None: 

2700 return _rfc3339_nanos_to_datetime(timestamp) 

2701 

2702 def configure_website(self, main_page_suffix=None, not_found_page=None): 

2703 """Configure website-related properties. 

2704 

2705 See https://cloud.google.com/storage/docs/static-website 

2706 

2707 .. note:: 

2708 This configures the bucket's website-related properties,controlling how 

2709 the service behaves when accessing bucket contents as a web site. 

2710 See [tutorials](https://cloud.google.com/storage/docs/hosting-static-website) and 

2711 [code samples](https://cloud.google.com/storage/docs/samples/storage-define-bucket-website-configuration#storage_define_bucket_website_configuration-python) 

2712 for more information. 

2713 

2714 :type main_page_suffix: str 

2715 :param main_page_suffix: The page to use as the main page 

2716 of a directory. 

2717 Typically something like index.html. 

2718 

2719 :type not_found_page: str 

2720 :param not_found_page: The file to use when a page isn't found. 

2721 """ 

2722 data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page} 

2723 self._patch_property("website", data) 

2724 

2725 def disable_website(self): 

2726 """Disable the website configuration for this bucket. 

2727 

2728 This is really just a shortcut for setting the website-related 

2729 attributes to ``None``. 

2730 """ 

2731 return self.configure_website(None, None) 

2732 

2733 def get_iam_policy( 

2734 self, 

2735 client=None, 

2736 requested_policy_version=None, 

2737 timeout=_DEFAULT_TIMEOUT, 

2738 retry=DEFAULT_RETRY, 

2739 ): 

2740 """Retrieve the IAM policy for the bucket. 

2741 

2742 See [API reference docs](https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy) 

2743 and a [code sample](https://cloud.google.com/storage/docs/samples/storage-view-bucket-iam-members#storage_view_bucket_iam_members-python). 

2744 

2745 If :attr:`user_project` is set, bills the API request to that project. 

2746 

2747 :type client: :class:`~google.cloud.storage.client.Client` or 

2748 ``NoneType`` 

2749 :param client: (Optional) The client to use. If not passed, falls back 

2750 to the ``client`` stored on the current bucket. 

2751 

2752 :type requested_policy_version: int or ``NoneType`` 

2753 :param requested_policy_version: (Optional) The version of IAM policies to request. 

2754 If a policy with a condition is requested without 

2755 setting this, the server will return an error. 

2756 This must be set to a value of 3 to retrieve IAM 

2757 policies containing conditions. This is to prevent 

2758 client code that isn't aware of IAM conditions from 

2759 interpreting and modifying policies incorrectly. 

2760 The service might return a policy with version lower 

2761 than the one that was requested, based on the 

2762 feature syntax in the policy fetched. 

2763 

2764 :type timeout: float or tuple 

2765 :param timeout: 

2766 (Optional) The amount of time, in seconds, to wait 

2767 for the server response. See: :ref:`configuring_timeouts` 

2768 

2769 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2770 :param retry: 

2771 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

2772 

2773 :rtype: :class:`google.api_core.iam.Policy` 

2774 :returns: the policy instance, based on the resource returned from 

2775 the ``getIamPolicy`` API request. 

2776 """ 

2777 client = self._require_client(client) 

2778 query_params = {} 

2779 

2780 if self.user_project is not None: 

2781 query_params["userProject"] = self.user_project 

2782 

2783 if requested_policy_version is not None: 

2784 query_params["optionsRequestedPolicyVersion"] = requested_policy_version 

2785 

2786 info = client._get_resource( 

2787 f"{self.path}/iam", 

2788 query_params=query_params, 

2789 timeout=timeout, 

2790 retry=retry, 

2791 _target_object=None, 

2792 ) 

2793 return Policy.from_api_repr(info) 

2794 

2795 def set_iam_policy( 

2796 self, 

2797 policy, 

2798 client=None, 

2799 timeout=_DEFAULT_TIMEOUT, 

2800 retry=DEFAULT_RETRY_IF_ETAG_IN_JSON, 

2801 ): 

2802 """Update the IAM policy for the bucket. 

2803 

2804 See 

2805 https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy 

2806 

2807 If :attr:`user_project` is set, bills the API request to that project. 

2808 

2809 :type policy: :class:`google.api_core.iam.Policy` 

2810 :param policy: policy instance used to update bucket's IAM policy. 

2811 

2812 :type client: :class:`~google.cloud.storage.client.Client` or 

2813 ``NoneType`` 

2814 :param client: (Optional) The client to use. If not passed, falls back 

2815 to the ``client`` stored on the current bucket. 

2816 

2817 :type timeout: float or tuple 

2818 :param timeout: 

2819 (Optional) The amount of time, in seconds, to wait 

2820 for the server response. See: :ref:`configuring_timeouts` 

2821 

2822 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2823 :param retry: 

2824 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

2825 

2826 :rtype: :class:`google.api_core.iam.Policy` 

2827 :returns: the policy instance, based on the resource returned from 

2828 the ``setIamPolicy`` API request. 

2829 """ 

2830 client = self._require_client(client) 

2831 query_params = {} 

2832 

2833 if self.user_project is not None: 

2834 query_params["userProject"] = self.user_project 

2835 

2836 path = f"{self.path}/iam" 

2837 resource = policy.to_api_repr() 

2838 resource["resourceId"] = self.path 

2839 

2840 info = client._put_resource( 

2841 path, 

2842 resource, 

2843 query_params=query_params, 

2844 timeout=timeout, 

2845 retry=retry, 

2846 _target_object=None, 

2847 ) 

2848 

2849 return Policy.from_api_repr(info) 

2850 

2851 def test_iam_permissions( 

2852 self, permissions, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

2853 ): 

2854 """API call: test permissions 

2855 

2856 See 

2857 https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions 

2858 

2859 If :attr:`user_project` is set, bills the API request to that project. 

2860 

2861 :type permissions: list of string 

2862 :param permissions: the permissions to check 

2863 

2864 :type client: :class:`~google.cloud.storage.client.Client` or 

2865 ``NoneType`` 

2866 :param client: (Optional) The client to use. If not passed, falls back 

2867 to the ``client`` stored on the current bucket. 

2868 

2869 :type timeout: float or tuple 

2870 :param timeout: 

2871 (Optional) The amount of time, in seconds, to wait 

2872 for the server response. See: :ref:`configuring_timeouts` 

2873 

2874 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2875 :param retry: 

2876 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

2877 

2878 :rtype: list of string 

2879 :returns: the permissions returned by the ``testIamPermissions`` API 

2880 request. 

2881 """ 

2882 client = self._require_client(client) 

2883 query_params = {"permissions": permissions} 

2884 

2885 if self.user_project is not None: 

2886 query_params["userProject"] = self.user_project 

2887 

2888 path = f"{self.path}/iam/testPermissions" 

2889 resp = client._get_resource( 

2890 path, 

2891 query_params=query_params, 

2892 timeout=timeout, 

2893 retry=retry, 

2894 _target_object=None, 

2895 ) 

2896 return resp.get("permissions", []) 

2897 

2898 def make_public( 

2899 self, 

2900 recursive=False, 

2901 future=False, 

2902 client=None, 

2903 timeout=_DEFAULT_TIMEOUT, 

2904 if_metageneration_match=None, 

2905 if_metageneration_not_match=None, 

2906 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

2907 ): 

2908 """Update bucket's ACL, granting read access to anonymous users. 

2909 

2910 :type recursive: bool 

2911 :param recursive: If True, this will make all blobs inside the bucket 

2912 public as well. 

2913 

2914 :type future: bool 

2915 :param future: If True, this will make all objects created in the 

2916 future public as well. 

2917 

2918 :type client: :class:`~google.cloud.storage.client.Client` or 

2919 ``NoneType`` 

2920 :param client: (Optional) The client to use. If not passed, falls back 

2921 to the ``client`` stored on the current bucket. 

2922 :type timeout: float or tuple 

2923 :param timeout: 

2924 (Optional) The amount of time, in seconds, to wait 

2925 for the server response. See: :ref:`configuring_timeouts` 

2926 

2927 :type if_metageneration_match: long 

2928 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

2929 blob's current metageneration matches the given value. 

2930 

2931 :type if_metageneration_not_match: long 

2932 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

2933 blob's current metageneration does not match the given value. 

2934 

2935 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

2936 :param retry: 

2937 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

2938 

2939 :raises ValueError: 

2940 If ``recursive`` is True, and the bucket contains more than 256 

2941 blobs. This is to prevent extremely long runtime of this 

2942 method. For such buckets, iterate over the blobs returned by 

2943 :meth:`list_blobs` and call 

2944 :meth:`~google.cloud.storage.blob.Blob.make_public` 

2945 for each blob. 

2946 """ 

2947 self.acl.all().grant_read() 

2948 self.acl.save( 

2949 client=client, 

2950 timeout=timeout, 

2951 if_metageneration_match=if_metageneration_match, 

2952 if_metageneration_not_match=if_metageneration_not_match, 

2953 retry=retry, 

2954 ) 

2955 

2956 if future: 

2957 doa = self.default_object_acl 

2958 if not doa.loaded: 

2959 doa.reload(client=client, timeout=timeout) 

2960 doa.all().grant_read() 

2961 doa.save( 

2962 client=client, 

2963 timeout=timeout, 

2964 if_metageneration_match=if_metageneration_match, 

2965 if_metageneration_not_match=if_metageneration_not_match, 

2966 retry=retry, 

2967 ) 

2968 

2969 if recursive: 

2970 blobs = list( 

2971 self.list_blobs( 

2972 projection="full", 

2973 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

2974 client=client, 

2975 timeout=timeout, 

2976 ) 

2977 ) 

2978 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

2979 message = ( 

2980 "Refusing to make public recursively with more than " 

2981 "%d objects. If you actually want to make every object " 

2982 "in this bucket public, iterate through the blobs " 

2983 "returned by 'Bucket.list_blobs()' and call " 

2984 "'make_public' on each one." 

2985 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

2986 raise ValueError(message) 

2987 

2988 for blob in blobs: 

2989 blob.acl.all().grant_read() 

2990 blob.acl.save( 

2991 client=client, 

2992 timeout=timeout, 

2993 ) 

2994 

2995 def make_private( 

2996 self, 

2997 recursive=False, 

2998 future=False, 

2999 client=None, 

3000 timeout=_DEFAULT_TIMEOUT, 

3001 if_metageneration_match=None, 

3002 if_metageneration_not_match=None, 

3003 retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, 

3004 ): 

3005 """Update bucket's ACL, revoking read access for anonymous users. 

3006 

3007 :type recursive: bool 

3008 :param recursive: If True, this will make all blobs inside the bucket 

3009 private as well. 

3010 

3011 :type future: bool 

3012 :param future: If True, this will make all objects created in the 

3013 future private as well. 

3014 

3015 :type client: :class:`~google.cloud.storage.client.Client` or 

3016 ``NoneType`` 

3017 :param client: (Optional) The client to use. If not passed, falls back 

3018 to the ``client`` stored on the current bucket. 

3019 

3020 :type timeout: float or tuple 

3021 :param timeout: 

3022 (Optional) The amount of time, in seconds, to wait 

3023 for the server response. See: :ref:`configuring_timeouts` 

3024 

3025 :type if_metageneration_match: long 

3026 :param if_metageneration_match: (Optional) Make the operation conditional on whether the 

3027 blob's current metageneration matches the given value. 

3028 :type if_metageneration_not_match: long 

3029 :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the 

3030 blob's current metageneration does not match the given value. 

3031 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3032 :param retry: 

3033 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3034 

3035 :raises ValueError: 

3036 If ``recursive`` is True, and the bucket contains more than 256 

3037 blobs. This is to prevent extremely long runtime of this 

3038 method. For such buckets, iterate over the blobs returned by 

3039 :meth:`list_blobs` and call 

3040 :meth:`~google.cloud.storage.blob.Blob.make_private` 

3041 for each blob. 

3042 """ 

3043 self.acl.all().revoke_read() 

3044 self.acl.save( 

3045 client=client, 

3046 timeout=timeout, 

3047 if_metageneration_match=if_metageneration_match, 

3048 if_metageneration_not_match=if_metageneration_not_match, 

3049 retry=retry, 

3050 ) 

3051 

3052 if future: 

3053 doa = self.default_object_acl 

3054 if not doa.loaded: 

3055 doa.reload(client=client, timeout=timeout) 

3056 doa.all().revoke_read() 

3057 doa.save( 

3058 client=client, 

3059 timeout=timeout, 

3060 if_metageneration_match=if_metageneration_match, 

3061 if_metageneration_not_match=if_metageneration_not_match, 

3062 retry=retry, 

3063 ) 

3064 

3065 if recursive: 

3066 blobs = list( 

3067 self.list_blobs( 

3068 projection="full", 

3069 max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, 

3070 client=client, 

3071 timeout=timeout, 

3072 ) 

3073 ) 

3074 if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: 

3075 message = ( 

3076 "Refusing to make private recursively with more than " 

3077 "%d objects. If you actually want to make every object " 

3078 "in this bucket private, iterate through the blobs " 

3079 "returned by 'Bucket.list_blobs()' and call " 

3080 "'make_private' on each one." 

3081 ) % (self._MAX_OBJECTS_FOR_ITERATION,) 

3082 raise ValueError(message) 

3083 

3084 for blob in blobs: 

3085 blob.acl.all().revoke_read() 

3086 blob.acl.save(client=client, timeout=timeout) 

3087 

3088 def generate_upload_policy(self, conditions, expiration=None, client=None): 

3089 """Create a signed upload policy for uploading objects. 

3090 

3091 This method generates and signs a policy document. You can use 

3092 [`policy documents`](https://cloud.google.com/storage/docs/xml-api/post-object-forms) 

3093 to allow visitors to a website to upload files to 

3094 Google Cloud Storage without giving them direct write access. 

3095 See a [code sample](https://cloud.google.com/storage/docs/xml-api/post-object-forms#python). 

3096 

3097 :type expiration: datetime 

3098 :param expiration: (Optional) Expiration in UTC. If not specified, the 

3099 policy will expire in 1 hour. 

3100 

3101 :type conditions: list 

3102 :param conditions: A list of conditions as described in the 

3103 `policy documents` documentation. 

3104 

3105 :type client: :class:`~google.cloud.storage.client.Client` 

3106 :param client: (Optional) The client to use. If not passed, falls back 

3107 to the ``client`` stored on the current bucket. 

3108 

3109 :rtype: dict 

3110 :returns: A dictionary of (form field name, form field value) of form 

3111 fields that should be added to your HTML upload form in order 

3112 to attach the signature. 

3113 """ 

3114 client = self._require_client(client) 

3115 credentials = client._credentials 

3116 _signing.ensure_signed_credentials(credentials) 

3117 

3118 if expiration is None: 

3119 expiration = _NOW() + datetime.timedelta(hours=1) 

3120 

3121 conditions = conditions + [{"bucket": self.name}] 

3122 

3123 policy_document = { 

3124 "expiration": _datetime_to_rfc3339(expiration), 

3125 "conditions": conditions, 

3126 } 

3127 

3128 encoded_policy_document = base64.b64encode( 

3129 json.dumps(policy_document).encode("utf-8") 

3130 ) 

3131 signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document)) 

3132 

3133 fields = { 

3134 "bucket": self.name, 

3135 "GoogleAccessId": credentials.signer_email, 

3136 "policy": encoded_policy_document.decode("utf-8"), 

3137 "signature": signature.decode("utf-8"), 

3138 } 

3139 

3140 return fields 

3141 

3142 def lock_retention_policy( 

3143 self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY 

3144 ): 

3145 """Lock the bucket's retention policy. 

3146 

3147 :type client: :class:`~google.cloud.storage.client.Client` or 

3148 ``NoneType`` 

3149 :param client: (Optional) The client to use. If not passed, falls back 

3150 to the ``client`` stored on the blob's bucket. 

3151 

3152 :type timeout: float or tuple 

3153 :param timeout: 

3154 (Optional) The amount of time, in seconds, to wait 

3155 for the server response. See: :ref:`configuring_timeouts` 

3156 

3157 :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy 

3158 :param retry: 

3159 (Optional) How to retry the RPC. See: :ref:`configuring_retries` 

3160 

3161 :raises ValueError: 

3162 if the bucket has no metageneration (i.e., new or never reloaded); 

3163 if the bucket has no retention policy assigned; 

3164 if the bucket's retention policy is already locked. 

3165 """ 

3166 if "metageneration" not in self._properties: 

3167 raise ValueError("Bucket has no retention policy assigned: try 'reload'?") 

3168 

3169 policy = self._properties.get("retentionPolicy") 

3170 

3171 if policy is None: 

3172 raise ValueError("Bucket has no retention policy assigned: try 'reload'?") 

3173 

3174 if policy.get("isLocked"): 

3175 raise ValueError("Bucket's retention policy is already locked.") 

3176 

3177 client = self._require_client(client) 

3178 

3179 query_params = {"ifMetagenerationMatch": self.metageneration} 

3180 

3181 if self.user_project is not None: 

3182 query_params["userProject"] = self.user_project 

3183 

3184 path = f"/b/{self.name}/lockRetentionPolicy" 

3185 api_response = client._post_resource( 

3186 path, 

3187 None, 

3188 query_params=query_params, 

3189 timeout=timeout, 

3190 retry=retry, 

3191 _target_object=self, 

3192 ) 

3193 self._set_properties(api_response) 

3194 

3195 def generate_signed_url( 

3196 self, 

3197 expiration=None, 

3198 api_access_endpoint=_API_ACCESS_ENDPOINT, 

3199 method="GET", 

3200 headers=None, 

3201 query_parameters=None, 

3202 client=None, 

3203 credentials=None, 

3204 version=None, 

3205 virtual_hosted_style=False, 

3206 bucket_bound_hostname=None, 

3207 scheme="http", 

3208 ): 

3209 """Generates a signed URL for this bucket. 

3210 

3211 .. note:: 

3212 

3213 If you are on Google Compute Engine, you can't generate a signed 

3214 URL using GCE service account. If you'd like to be able to generate 

3215 a signed URL from GCE, you can use a standard service account from a 

3216 JSON file rather than a GCE service account. 

3217 

3218 If you have a bucket that you want to allow access to for a set 

3219 amount of time, you can use this method to generate a URL that 

3220 is only valid within a certain time period. 

3221 

3222 If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`, 

3223 ``https`` works only if using a ``CDN``. 

3224 

3225 :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] 

3226 :param expiration: Point in time when the signed URL should expire. If 

3227 a ``datetime`` instance is passed without an explicit 

3228 ``tzinfo`` set, it will be assumed to be ``UTC``. 

3229 

3230 :type api_access_endpoint: str 

3231 :param api_access_endpoint: (Optional) URI base. 

3232 

3233 :type method: str 

3234 :param method: The HTTP verb that will be used when requesting the URL. 

3235 

3236 :type headers: dict 

3237 :param headers: 

3238 (Optional) Additional HTTP headers to be included as part of the 

3239 signed URLs. See: 

3240 https://cloud.google.com/storage/docs/xml-api/reference-headers 

3241 Requests using the signed URL *must* pass the specified header 

3242 (name and value) with each request for the URL. 

3243 

3244 :type query_parameters: dict 

3245 :param query_parameters: 

3246 (Optional) Additional query parameters to be included as part of the 

3247 signed URLs. See: 

3248 https://cloud.google.com/storage/docs/xml-api/reference-headers#query 

3249 

3250 :type client: :class:`~google.cloud.storage.client.Client` or 

3251 ``NoneType`` 

3252 :param client: (Optional) The client to use. If not passed, falls back 

3253 to the ``client`` stored on the blob's bucket. 

3254 

3255 

3256 :type credentials: :class:`google.auth.credentials.Credentials` or 

3257 :class:`NoneType` 

3258 :param credentials: The authorization credentials to attach to requests. 

3259 These credentials identify this application to the service. 

3260 If none are specified, the client will attempt to ascertain 

3261 the credentials from the environment. 

3262 

3263 :type version: str 

3264 :param version: (Optional) The version of signed credential to create. 

3265 Must be one of 'v2' | 'v4'. 

3266 

3267 :type virtual_hosted_style: bool 

3268 :param virtual_hosted_style: 

3269 (Optional) If true, then construct the URL relative the bucket's 

3270 virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'. 

3271 

3272 :type bucket_bound_hostname: str 

3273 :param bucket_bound_hostname: 

3274 (Optional) If pass, then construct the URL relative to the bucket-bound hostname. 

3275 Value cane be a bare or with scheme, e.g., 'example.com' or 'http://example.com'. 

3276 See: https://cloud.google.com/storage/docs/request-endpoints#cname 

3277 

3278 :type scheme: str 

3279 :param scheme: 

3280 (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use 

3281 this value as the scheme. ``https`` will work only when using a CDN. 

3282 Defaults to ``"http"``. 

3283 

3284 :raises: :exc:`ValueError` when version is invalid. 

3285 :raises: :exc:`TypeError` when expiration is not a valid type. 

3286 :raises: :exc:`AttributeError` if credentials is not an instance 

3287 of :class:`google.auth.credentials.Signing`. 

3288 

3289 :rtype: str 

3290 :returns: A signed URL you can use to access the resource 

3291 until expiration. 

3292 """ 

3293 if version is None: 

3294 version = "v2" 

3295 elif version not in ("v2", "v4"): 

3296 raise ValueError("'version' must be either 'v2' or 'v4'") 

3297 

3298 # If you are on Google Compute Engine, you can't generate a signed URL 

3299 # using GCE service account. 

3300 # See https://github.com/googleapis/google-auth-library-python/issues/50 

3301 if virtual_hosted_style: 

3302 api_access_endpoint = f"https://{self.name}.storage.googleapis.com" 

3303 elif bucket_bound_hostname: 

3304 api_access_endpoint = _bucket_bound_hostname_url( 

3305 bucket_bound_hostname, scheme 

3306 ) 

3307 else: 

3308 resource = f"/{self.name}" 

3309 

3310 if virtual_hosted_style or bucket_bound_hostname: 

3311 resource = "/" 

3312 

3313 if credentials is None: 

3314 client = self._require_client(client) 

3315 credentials = client._credentials 

3316 

3317 if version == "v2": 

3318 helper = generate_signed_url_v2 

3319 else: 

3320 helper = generate_signed_url_v4 

3321 

3322 return helper( 

3323 credentials, 

3324 resource=resource, 

3325 expiration=expiration, 

3326 api_access_endpoint=api_access_endpoint, 

3327 method=method.upper(), 

3328 headers=headers, 

3329 query_parameters=query_parameters, 

3330 ) 

3331 

3332 

3333def _raise_if_len_differs(expected_len, **generation_match_args): 

3334 """ 

3335 Raise an error if any generation match argument 

3336 is set and its len differs from the given value. 

3337 

3338 :type expected_len: int 

3339 :param expected_len: Expected argument length in case it's set. 

3340 

3341 :type generation_match_args: dict 

3342 :param generation_match_args: Lists, which length must be checked. 

3343 

3344 :raises: :exc:`ValueError` if any argument set, but has an unexpected length. 

3345 """ 

3346 for name, value in generation_match_args.items(): 

3347 if value is not None and len(value) != expected_len: 

3348 raise ValueError(f"'{name}' length must be the same as 'blobs' length")