Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/google/cloud/firestore_v1/bulk_batch.py: 50%
20 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-09 06:27 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-09 06:27 +0000
1# Copyright 2021 Google LLC All rights reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15"""Helpers for batch requests to the Google Cloud Firestore API."""
16from google.api_core import gapic_v1
17from google.api_core import retry as retries
19from google.cloud.firestore_v1 import _helpers
20from google.cloud.firestore_v1.base_batch import BaseBatch
21from google.cloud.firestore_v1.types.firestore import BatchWriteResponse
24class BulkWriteBatch(BaseBatch):
25 """Accumulate write operations to be sent in a batch. Use this over
26 `WriteBatch` for higher volumes (e.g., via `BulkWriter`) and when the order
27 of operations within a given batch is unimportant.
29 Because the order in which individual write operations are applied to the database
30 is not guaranteed, `batch_write` RPCs can never contain multiple operations
31 to the same document. If calling code detects a second write operation to a
32 known document reference, it should first cut off the previous batch and
33 send it, then create a new batch starting with the latest write operation.
34 In practice, the [Async]BulkWriter classes handle this.
36 This has the same set of methods for write operations that
37 :class:`~google.cloud.firestore_v1.document.DocumentReference` does,
38 e.g. :meth:`~google.cloud.firestore_v1.document.DocumentReference.create`.
40 Args:
41 client (:class:`~google.cloud.firestore_v1.client.Client`):
42 The client that created this batch.
43 """
45 def __init__(self, client) -> None:
46 super(BulkWriteBatch, self).__init__(client=client)
48 def commit(
49 self, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None
50 ) -> BatchWriteResponse:
51 """Writes the changes accumulated in this batch.
53 Write operations are not guaranteed to be applied in order and must not
54 contain multiple writes to any given document. Preferred over `commit`
55 for performance reasons if these conditions are acceptable.
57 Args:
58 retry (google.api_core.retry.Retry): Designation of what errors, if any,
59 should be retried. Defaults to a system-specified policy.
60 timeout (float): The timeout for this request. Defaults to a
61 system-specified value.
63 Returns:
64 :class:`google.cloud.proto.firestore.v1.write.BatchWriteResponse`:
65 Container holding the write results corresponding to the changes
66 committed, returned in the same order as the changes were applied to
67 this batch. An individual write result contains an ``update_time``
68 field.
69 """
70 request, kwargs = self._prep_commit(retry, timeout)
72 _api = self._client._firestore_api
73 save_response: BatchWriteResponse = _api.batch_write(
74 request=request,
75 metadata=self._client._rpc_metadata,
76 **kwargs,
77 )
79 self._write_pbs = []
80 self.write_results = list(save_response.write_results)
82 return save_response
84 def _prep_commit(self, retry: retries.Retry, timeout: float):
85 request = {
86 "database": self._client._database_string,
87 "writes": self._write_pbs,
88 "labels": None,
89 }
90 kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
91 return request, kwargs