1# -*- coding: utf-8 -*-
2# Copyright 2025 Google LLC
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16import abc
17from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
19from google.cloud.firestore_v1 import gapic_version as package_version
20
21import google.auth # type: ignore
22import google.api_core
23from google.api_core import exceptions as core_exceptions
24from google.api_core import gapic_v1
25from google.api_core import retry as retries
26from google.auth import credentials as ga_credentials # type: ignore
27from google.oauth2 import service_account # type: ignore
28import google.protobuf
29
30from google.cloud.firestore_v1.types import document
31from google.cloud.firestore_v1.types import document as gf_document
32from google.cloud.firestore_v1.types import firestore
33from google.cloud.location import locations_pb2 # type: ignore
34from google.longrunning import operations_pb2 # type: ignore
35from google.protobuf import empty_pb2 # type: ignore
36
37DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
38 gapic_version=package_version.__version__
39)
40
41if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
42 DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
43
44
45class FirestoreTransport(abc.ABC):
46 """Abstract transport class for Firestore."""
47
48 AUTH_SCOPES = (
49 "https://www.googleapis.com/auth/cloud-platform",
50 "https://www.googleapis.com/auth/datastore",
51 )
52
53 DEFAULT_HOST: str = "firestore.googleapis.com"
54
55 def __init__(
56 self,
57 *,
58 host: str = DEFAULT_HOST,
59 credentials: Optional[ga_credentials.Credentials] = None,
60 credentials_file: Optional[str] = None,
61 scopes: Optional[Sequence[str]] = None,
62 quota_project_id: Optional[str] = None,
63 client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
64 always_use_jwt_access: Optional[bool] = False,
65 api_audience: Optional[str] = None,
66 **kwargs,
67 ) -> None:
68 """Instantiate the transport.
69
70 Args:
71 host (Optional[str]):
72 The hostname to connect to (default: 'firestore.googleapis.com').
73 credentials (Optional[google.auth.credentials.Credentials]): The
74 authorization credentials to attach to requests. These
75 credentials identify the application to the service; if none
76 are specified, the client will attempt to ascertain the
77 credentials from the environment.
78 credentials_file (Optional[str]): Deprecated. A file with credentials that can
79 be loaded with :func:`google.auth.load_credentials_from_file`.
80 This argument is mutually exclusive with credentials. This argument will be
81 removed in the next major version of this library.
82 scopes (Optional[Sequence[str]]): A list of scopes.
83 quota_project_id (Optional[str]): An optional project to use for billing
84 and quota.
85 client_info (google.api_core.gapic_v1.client_info.ClientInfo):
86 The client info used to send a user-agent string along with
87 API requests. If ``None``, then default info will be used.
88 Generally, you only need to set this if you're developing
89 your own client library.
90 always_use_jwt_access (Optional[bool]): Whether self signed JWT should
91 be used for service account credentials.
92 """
93
94 scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
95
96 # Save the scopes.
97 self._scopes = scopes
98 if not hasattr(self, "_ignore_credentials"):
99 self._ignore_credentials: bool = False
100
101 # If no credentials are provided, then determine the appropriate
102 # defaults.
103 if credentials and credentials_file:
104 raise core_exceptions.DuplicateCredentialArgs(
105 "'credentials_file' and 'credentials' are mutually exclusive"
106 )
107
108 if credentials_file is not None:
109 credentials, _ = google.auth.load_credentials_from_file(
110 credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
111 )
112 elif credentials is None and not self._ignore_credentials:
113 credentials, _ = google.auth.default(
114 **scopes_kwargs, quota_project_id=quota_project_id
115 )
116 # Don't apply audience if the credentials file passed from user.
117 if hasattr(credentials, "with_gdch_audience"):
118 credentials = credentials.with_gdch_audience(
119 api_audience if api_audience else host
120 )
121
122 # If the credentials are service account credentials, then always try to use self signed JWT.
123 if (
124 always_use_jwt_access
125 and isinstance(credentials, service_account.Credentials)
126 and hasattr(service_account.Credentials, "with_always_use_jwt_access")
127 ):
128 credentials = credentials.with_always_use_jwt_access(True)
129
130 # Save the credentials.
131 self._credentials = credentials
132
133 # Save the hostname. Default to port 443 (HTTPS) if none is specified.
134 if ":" not in host:
135 host += ":443"
136 self._host = host
137
138 @property
139 def host(self):
140 return self._host
141
142 def _prep_wrapped_messages(self, client_info):
143 # Precompute the wrapped methods.
144 self._wrapped_methods = {
145 self.get_document: gapic_v1.method.wrap_method(
146 self.get_document,
147 default_retry=retries.Retry(
148 initial=0.1,
149 maximum=60.0,
150 multiplier=1.3,
151 predicate=retries.if_exception_type(
152 core_exceptions.DeadlineExceeded,
153 core_exceptions.InternalServerError,
154 core_exceptions.ResourceExhausted,
155 core_exceptions.ServiceUnavailable,
156 ),
157 deadline=60.0,
158 ),
159 default_timeout=60.0,
160 client_info=client_info,
161 ),
162 self.list_documents: gapic_v1.method.wrap_method(
163 self.list_documents,
164 default_retry=retries.Retry(
165 initial=0.1,
166 maximum=60.0,
167 multiplier=1.3,
168 predicate=retries.if_exception_type(
169 core_exceptions.DeadlineExceeded,
170 core_exceptions.InternalServerError,
171 core_exceptions.ResourceExhausted,
172 core_exceptions.ServiceUnavailable,
173 ),
174 deadline=60.0,
175 ),
176 default_timeout=60.0,
177 client_info=client_info,
178 ),
179 self.update_document: gapic_v1.method.wrap_method(
180 self.update_document,
181 default_retry=retries.Retry(
182 initial=0.1,
183 maximum=60.0,
184 multiplier=1.3,
185 predicate=retries.if_exception_type(
186 core_exceptions.ResourceExhausted,
187 core_exceptions.ServiceUnavailable,
188 ),
189 deadline=60.0,
190 ),
191 default_timeout=60.0,
192 client_info=client_info,
193 ),
194 self.delete_document: gapic_v1.method.wrap_method(
195 self.delete_document,
196 default_retry=retries.Retry(
197 initial=0.1,
198 maximum=60.0,
199 multiplier=1.3,
200 predicate=retries.if_exception_type(
201 core_exceptions.DeadlineExceeded,
202 core_exceptions.InternalServerError,
203 core_exceptions.ResourceExhausted,
204 core_exceptions.ServiceUnavailable,
205 ),
206 deadline=60.0,
207 ),
208 default_timeout=60.0,
209 client_info=client_info,
210 ),
211 self.batch_get_documents: gapic_v1.method.wrap_method(
212 self.batch_get_documents,
213 default_retry=retries.Retry(
214 initial=0.1,
215 maximum=60.0,
216 multiplier=1.3,
217 predicate=retries.if_exception_type(
218 core_exceptions.DeadlineExceeded,
219 core_exceptions.InternalServerError,
220 core_exceptions.ResourceExhausted,
221 core_exceptions.ServiceUnavailable,
222 ),
223 deadline=300.0,
224 ),
225 default_timeout=300.0,
226 client_info=client_info,
227 ),
228 self.begin_transaction: gapic_v1.method.wrap_method(
229 self.begin_transaction,
230 default_retry=retries.Retry(
231 initial=0.1,
232 maximum=60.0,
233 multiplier=1.3,
234 predicate=retries.if_exception_type(
235 core_exceptions.DeadlineExceeded,
236 core_exceptions.InternalServerError,
237 core_exceptions.ResourceExhausted,
238 core_exceptions.ServiceUnavailable,
239 ),
240 deadline=60.0,
241 ),
242 default_timeout=60.0,
243 client_info=client_info,
244 ),
245 self.commit: gapic_v1.method.wrap_method(
246 self.commit,
247 default_retry=retries.Retry(
248 initial=0.1,
249 maximum=60.0,
250 multiplier=1.3,
251 predicate=retries.if_exception_type(
252 core_exceptions.ResourceExhausted,
253 core_exceptions.ServiceUnavailable,
254 ),
255 deadline=60.0,
256 ),
257 default_timeout=60.0,
258 client_info=client_info,
259 ),
260 self.rollback: gapic_v1.method.wrap_method(
261 self.rollback,
262 default_retry=retries.Retry(
263 initial=0.1,
264 maximum=60.0,
265 multiplier=1.3,
266 predicate=retries.if_exception_type(
267 core_exceptions.DeadlineExceeded,
268 core_exceptions.InternalServerError,
269 core_exceptions.ResourceExhausted,
270 core_exceptions.ServiceUnavailable,
271 ),
272 deadline=60.0,
273 ),
274 default_timeout=60.0,
275 client_info=client_info,
276 ),
277 self.run_query: gapic_v1.method.wrap_method(
278 self.run_query,
279 default_retry=retries.Retry(
280 initial=0.1,
281 maximum=60.0,
282 multiplier=1.3,
283 predicate=retries.if_exception_type(
284 core_exceptions.DeadlineExceeded,
285 core_exceptions.InternalServerError,
286 core_exceptions.ResourceExhausted,
287 core_exceptions.ServiceUnavailable,
288 ),
289 deadline=300.0,
290 ),
291 default_timeout=300.0,
292 client_info=client_info,
293 ),
294 self.execute_pipeline: gapic_v1.method.wrap_method(
295 self.execute_pipeline,
296 default_retry=retries.Retry(
297 initial=0.1,
298 maximum=60.0,
299 multiplier=1.3,
300 predicate=retries.if_exception_type(
301 core_exceptions.DeadlineExceeded,
302 core_exceptions.InternalServerError,
303 core_exceptions.ResourceExhausted,
304 core_exceptions.ServiceUnavailable,
305 ),
306 deadline=300.0,
307 ),
308 default_timeout=300.0,
309 client_info=client_info,
310 ),
311 self.run_aggregation_query: gapic_v1.method.wrap_method(
312 self.run_aggregation_query,
313 default_retry=retries.Retry(
314 initial=0.1,
315 maximum=60.0,
316 multiplier=1.3,
317 predicate=retries.if_exception_type(
318 core_exceptions.DeadlineExceeded,
319 core_exceptions.InternalServerError,
320 core_exceptions.ResourceExhausted,
321 core_exceptions.ServiceUnavailable,
322 ),
323 deadline=300.0,
324 ),
325 default_timeout=300.0,
326 client_info=client_info,
327 ),
328 self.partition_query: gapic_v1.method.wrap_method(
329 self.partition_query,
330 default_retry=retries.Retry(
331 initial=0.1,
332 maximum=60.0,
333 multiplier=1.3,
334 predicate=retries.if_exception_type(
335 core_exceptions.DeadlineExceeded,
336 core_exceptions.InternalServerError,
337 core_exceptions.ResourceExhausted,
338 core_exceptions.ServiceUnavailable,
339 ),
340 deadline=300.0,
341 ),
342 default_timeout=300.0,
343 client_info=client_info,
344 ),
345 self.write: gapic_v1.method.wrap_method(
346 self.write,
347 default_timeout=86400.0,
348 client_info=client_info,
349 ),
350 self.listen: gapic_v1.method.wrap_method(
351 self.listen,
352 default_retry=retries.Retry(
353 initial=0.1,
354 maximum=60.0,
355 multiplier=1.3,
356 predicate=retries.if_exception_type(
357 core_exceptions.DeadlineExceeded,
358 core_exceptions.InternalServerError,
359 core_exceptions.ResourceExhausted,
360 core_exceptions.ServiceUnavailable,
361 ),
362 deadline=86400.0,
363 ),
364 default_timeout=86400.0,
365 client_info=client_info,
366 ),
367 self.list_collection_ids: gapic_v1.method.wrap_method(
368 self.list_collection_ids,
369 default_retry=retries.Retry(
370 initial=0.1,
371 maximum=60.0,
372 multiplier=1.3,
373 predicate=retries.if_exception_type(
374 core_exceptions.DeadlineExceeded,
375 core_exceptions.InternalServerError,
376 core_exceptions.ResourceExhausted,
377 core_exceptions.ServiceUnavailable,
378 ),
379 deadline=60.0,
380 ),
381 default_timeout=60.0,
382 client_info=client_info,
383 ),
384 self.batch_write: gapic_v1.method.wrap_method(
385 self.batch_write,
386 default_retry=retries.Retry(
387 initial=0.1,
388 maximum=60.0,
389 multiplier=1.3,
390 predicate=retries.if_exception_type(
391 core_exceptions.Aborted,
392 core_exceptions.ResourceExhausted,
393 core_exceptions.ServiceUnavailable,
394 ),
395 deadline=60.0,
396 ),
397 default_timeout=60.0,
398 client_info=client_info,
399 ),
400 self.create_document: gapic_v1.method.wrap_method(
401 self.create_document,
402 default_retry=retries.Retry(
403 initial=0.1,
404 maximum=60.0,
405 multiplier=1.3,
406 predicate=retries.if_exception_type(
407 core_exceptions.ResourceExhausted,
408 core_exceptions.ServiceUnavailable,
409 ),
410 deadline=60.0,
411 ),
412 default_timeout=60.0,
413 client_info=client_info,
414 ),
415 self.cancel_operation: gapic_v1.method.wrap_method(
416 self.cancel_operation,
417 default_timeout=None,
418 client_info=client_info,
419 ),
420 self.delete_operation: gapic_v1.method.wrap_method(
421 self.delete_operation,
422 default_timeout=None,
423 client_info=client_info,
424 ),
425 self.get_operation: gapic_v1.method.wrap_method(
426 self.get_operation,
427 default_timeout=None,
428 client_info=client_info,
429 ),
430 self.list_operations: gapic_v1.method.wrap_method(
431 self.list_operations,
432 default_timeout=None,
433 client_info=client_info,
434 ),
435 }
436
437 def close(self):
438 """Closes resources associated with the transport.
439
440 .. warning::
441 Only call this method if the transport is NOT shared
442 with other clients - this may cause errors in other clients!
443 """
444 raise NotImplementedError()
445
446 @property
447 def get_document(
448 self,
449 ) -> Callable[
450 [firestore.GetDocumentRequest],
451 Union[document.Document, Awaitable[document.Document]],
452 ]:
453 raise NotImplementedError()
454
455 @property
456 def list_documents(
457 self,
458 ) -> Callable[
459 [firestore.ListDocumentsRequest],
460 Union[
461 firestore.ListDocumentsResponse, Awaitable[firestore.ListDocumentsResponse]
462 ],
463 ]:
464 raise NotImplementedError()
465
466 @property
467 def update_document(
468 self,
469 ) -> Callable[
470 [firestore.UpdateDocumentRequest],
471 Union[gf_document.Document, Awaitable[gf_document.Document]],
472 ]:
473 raise NotImplementedError()
474
475 @property
476 def delete_document(
477 self,
478 ) -> Callable[
479 [firestore.DeleteDocumentRequest],
480 Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
481 ]:
482 raise NotImplementedError()
483
484 @property
485 def batch_get_documents(
486 self,
487 ) -> Callable[
488 [firestore.BatchGetDocumentsRequest],
489 Union[
490 firestore.BatchGetDocumentsResponse,
491 Awaitable[firestore.BatchGetDocumentsResponse],
492 ],
493 ]:
494 raise NotImplementedError()
495
496 @property
497 def begin_transaction(
498 self,
499 ) -> Callable[
500 [firestore.BeginTransactionRequest],
501 Union[
502 firestore.BeginTransactionResponse,
503 Awaitable[firestore.BeginTransactionResponse],
504 ],
505 ]:
506 raise NotImplementedError()
507
508 @property
509 def commit(
510 self,
511 ) -> Callable[
512 [firestore.CommitRequest],
513 Union[firestore.CommitResponse, Awaitable[firestore.CommitResponse]],
514 ]:
515 raise NotImplementedError()
516
517 @property
518 def rollback(
519 self,
520 ) -> Callable[
521 [firestore.RollbackRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
522 ]:
523 raise NotImplementedError()
524
525 @property
526 def run_query(
527 self,
528 ) -> Callable[
529 [firestore.RunQueryRequest],
530 Union[firestore.RunQueryResponse, Awaitable[firestore.RunQueryResponse]],
531 ]:
532 raise NotImplementedError()
533
534 @property
535 def execute_pipeline(
536 self,
537 ) -> Callable[
538 [firestore.ExecutePipelineRequest],
539 Union[
540 firestore.ExecutePipelineResponse,
541 Awaitable[firestore.ExecutePipelineResponse],
542 ],
543 ]:
544 raise NotImplementedError()
545
546 @property
547 def run_aggregation_query(
548 self,
549 ) -> Callable[
550 [firestore.RunAggregationQueryRequest],
551 Union[
552 firestore.RunAggregationQueryResponse,
553 Awaitable[firestore.RunAggregationQueryResponse],
554 ],
555 ]:
556 raise NotImplementedError()
557
558 @property
559 def partition_query(
560 self,
561 ) -> Callable[
562 [firestore.PartitionQueryRequest],
563 Union[
564 firestore.PartitionQueryResponse,
565 Awaitable[firestore.PartitionQueryResponse],
566 ],
567 ]:
568 raise NotImplementedError()
569
570 @property
571 def write(
572 self,
573 ) -> Callable[
574 [firestore.WriteRequest],
575 Union[firestore.WriteResponse, Awaitable[firestore.WriteResponse]],
576 ]:
577 raise NotImplementedError()
578
579 @property
580 def listen(
581 self,
582 ) -> Callable[
583 [firestore.ListenRequest],
584 Union[firestore.ListenResponse, Awaitable[firestore.ListenResponse]],
585 ]:
586 raise NotImplementedError()
587
588 @property
589 def list_collection_ids(
590 self,
591 ) -> Callable[
592 [firestore.ListCollectionIdsRequest],
593 Union[
594 firestore.ListCollectionIdsResponse,
595 Awaitable[firestore.ListCollectionIdsResponse],
596 ],
597 ]:
598 raise NotImplementedError()
599
600 @property
601 def batch_write(
602 self,
603 ) -> Callable[
604 [firestore.BatchWriteRequest],
605 Union[firestore.BatchWriteResponse, Awaitable[firestore.BatchWriteResponse]],
606 ]:
607 raise NotImplementedError()
608
609 @property
610 def create_document(
611 self,
612 ) -> Callable[
613 [firestore.CreateDocumentRequest],
614 Union[document.Document, Awaitable[document.Document]],
615 ]:
616 raise NotImplementedError()
617
618 @property
619 def list_operations(
620 self,
621 ) -> Callable[
622 [operations_pb2.ListOperationsRequest],
623 Union[
624 operations_pb2.ListOperationsResponse,
625 Awaitable[operations_pb2.ListOperationsResponse],
626 ],
627 ]:
628 raise NotImplementedError()
629
630 @property
631 def get_operation(
632 self,
633 ) -> Callable[
634 [operations_pb2.GetOperationRequest],
635 Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
636 ]:
637 raise NotImplementedError()
638
639 @property
640 def cancel_operation(
641 self,
642 ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
643 raise NotImplementedError()
644
645 @property
646 def delete_operation(
647 self,
648 ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:
649 raise NotImplementedError()
650
651 @property
652 def kind(self) -> str:
653 raise NotImplementedError()
654
655
656__all__ = ("FirestoreTransport",)