1# -*- coding: utf-8 -*-
2# Copyright 2025 Google LLC
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16import abc
17from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
19from google.cloud.firestore_v1 import gapic_version as package_version
20
21import google.auth # type: ignore
22import google.api_core
23from google.api_core import exceptions as core_exceptions
24from google.api_core import gapic_v1
25from google.api_core import retry as retries
26from google.auth import credentials as ga_credentials # type: ignore
27from google.oauth2 import service_account # type: ignore
28import google.protobuf
29
30from google.cloud.firestore_v1.types import document
31from google.cloud.firestore_v1.types import document as gf_document
32from google.cloud.firestore_v1.types import firestore
33from google.cloud.location import locations_pb2 # type: ignore
34from google.longrunning import operations_pb2 # type: ignore
35import google.protobuf.empty_pb2 as empty_pb2 # type: ignore
36
37DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
38 gapic_version=package_version.__version__
39)
40
41if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
42 DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
43
44
45class FirestoreTransport(abc.ABC):
46 """Abstract transport class for Firestore."""
47
48 AUTH_SCOPES = (
49 "https://www.googleapis.com/auth/cloud-platform",
50 "https://www.googleapis.com/auth/datastore",
51 )
52
53 DEFAULT_HOST: str = "firestore.googleapis.com"
54
55 def __init__(
56 self,
57 *,
58 host: str = DEFAULT_HOST,
59 credentials: Optional[ga_credentials.Credentials] = None,
60 credentials_file: Optional[str] = None,
61 scopes: Optional[Sequence[str]] = None,
62 quota_project_id: Optional[str] = None,
63 client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
64 always_use_jwt_access: Optional[bool] = False,
65 api_audience: Optional[str] = None,
66 **kwargs,
67 ) -> None:
68 """Instantiate the transport.
69
70 Args:
71 host (Optional[str]):
72 The hostname to connect to (default: 'firestore.googleapis.com').
73 credentials (Optional[google.auth.credentials.Credentials]): The
74 authorization credentials to attach to requests. These
75 credentials identify the application to the service; if none
76 are specified, the client will attempt to ascertain the
77 credentials from the environment.
78 credentials_file (Optional[str]): Deprecated. A file with credentials that can
79 be loaded with :func:`google.auth.load_credentials_from_file`.
80 This argument is mutually exclusive with credentials. This argument will be
81 removed in the next major version of this library.
82 scopes (Optional[Sequence[str]]): A list of scopes.
83 quota_project_id (Optional[str]): An optional project to use for billing
84 and quota.
85 client_info (google.api_core.gapic_v1.client_info.ClientInfo):
86 The client info used to send a user-agent string along with
87 API requests. If ``None``, then default info will be used.
88 Generally, you only need to set this if you're developing
89 your own client library.
90 always_use_jwt_access (Optional[bool]): Whether self signed JWT should
91 be used for service account credentials.
92 """
93
94 # Save the scopes.
95 self._scopes = scopes
96 if not hasattr(self, "_ignore_credentials"):
97 self._ignore_credentials: bool = False
98
99 # If no credentials are provided, then determine the appropriate
100 # defaults.
101 if credentials and credentials_file:
102 raise core_exceptions.DuplicateCredentialArgs(
103 "'credentials_file' and 'credentials' are mutually exclusive"
104 )
105
106 if credentials_file is not None:
107 credentials, _ = google.auth.load_credentials_from_file(
108 credentials_file,
109 scopes=scopes,
110 quota_project_id=quota_project_id,
111 default_scopes=self.AUTH_SCOPES,
112 )
113 elif credentials is None and not self._ignore_credentials:
114 credentials, _ = google.auth.default(
115 scopes=scopes,
116 quota_project_id=quota_project_id,
117 default_scopes=self.AUTH_SCOPES,
118 )
119 # Don't apply audience if the credentials file passed from user.
120 if hasattr(credentials, "with_gdch_audience"):
121 credentials = credentials.with_gdch_audience(
122 api_audience if api_audience else host
123 )
124
125 # If the credentials are service account credentials, then always try to use self signed JWT.
126 if (
127 always_use_jwt_access
128 and isinstance(credentials, service_account.Credentials)
129 and hasattr(service_account.Credentials, "with_always_use_jwt_access")
130 ):
131 credentials = credentials.with_always_use_jwt_access(True)
132
133 # Save the credentials.
134 self._credentials = credentials
135
136 # Save the hostname. Default to port 443 (HTTPS) if none is specified.
137 if ":" not in host:
138 host += ":443"
139 self._host = host
140
141 @property
142 def host(self):
143 return self._host
144
145 def _prep_wrapped_messages(self, client_info):
146 # Precompute the wrapped methods.
147 self._wrapped_methods = {
148 self.get_document: gapic_v1.method.wrap_method(
149 self.get_document,
150 default_retry=retries.Retry(
151 initial=0.1,
152 maximum=60.0,
153 multiplier=1.3,
154 predicate=retries.if_exception_type(
155 core_exceptions.DeadlineExceeded,
156 core_exceptions.InternalServerError,
157 core_exceptions.ResourceExhausted,
158 core_exceptions.ServiceUnavailable,
159 ),
160 deadline=60.0,
161 ),
162 default_timeout=60.0,
163 client_info=client_info,
164 ),
165 self.list_documents: gapic_v1.method.wrap_method(
166 self.list_documents,
167 default_retry=retries.Retry(
168 initial=0.1,
169 maximum=60.0,
170 multiplier=1.3,
171 predicate=retries.if_exception_type(
172 core_exceptions.DeadlineExceeded,
173 core_exceptions.InternalServerError,
174 core_exceptions.ResourceExhausted,
175 core_exceptions.ServiceUnavailable,
176 ),
177 deadline=60.0,
178 ),
179 default_timeout=60.0,
180 client_info=client_info,
181 ),
182 self.update_document: gapic_v1.method.wrap_method(
183 self.update_document,
184 default_retry=retries.Retry(
185 initial=0.1,
186 maximum=60.0,
187 multiplier=1.3,
188 predicate=retries.if_exception_type(
189 core_exceptions.ResourceExhausted,
190 core_exceptions.ServiceUnavailable,
191 ),
192 deadline=60.0,
193 ),
194 default_timeout=60.0,
195 client_info=client_info,
196 ),
197 self.delete_document: gapic_v1.method.wrap_method(
198 self.delete_document,
199 default_retry=retries.Retry(
200 initial=0.1,
201 maximum=60.0,
202 multiplier=1.3,
203 predicate=retries.if_exception_type(
204 core_exceptions.DeadlineExceeded,
205 core_exceptions.InternalServerError,
206 core_exceptions.ResourceExhausted,
207 core_exceptions.ServiceUnavailable,
208 ),
209 deadline=60.0,
210 ),
211 default_timeout=60.0,
212 client_info=client_info,
213 ),
214 self.batch_get_documents: gapic_v1.method.wrap_method(
215 self.batch_get_documents,
216 default_retry=retries.Retry(
217 initial=0.1,
218 maximum=60.0,
219 multiplier=1.3,
220 predicate=retries.if_exception_type(
221 core_exceptions.DeadlineExceeded,
222 core_exceptions.InternalServerError,
223 core_exceptions.ResourceExhausted,
224 core_exceptions.ServiceUnavailable,
225 ),
226 deadline=300.0,
227 ),
228 default_timeout=300.0,
229 client_info=client_info,
230 ),
231 self.begin_transaction: gapic_v1.method.wrap_method(
232 self.begin_transaction,
233 default_retry=retries.Retry(
234 initial=0.1,
235 maximum=60.0,
236 multiplier=1.3,
237 predicate=retries.if_exception_type(
238 core_exceptions.DeadlineExceeded,
239 core_exceptions.InternalServerError,
240 core_exceptions.ResourceExhausted,
241 core_exceptions.ServiceUnavailable,
242 ),
243 deadline=60.0,
244 ),
245 default_timeout=60.0,
246 client_info=client_info,
247 ),
248 self.commit: gapic_v1.method.wrap_method(
249 self.commit,
250 default_retry=retries.Retry(
251 initial=0.1,
252 maximum=60.0,
253 multiplier=1.3,
254 predicate=retries.if_exception_type(
255 core_exceptions.ResourceExhausted,
256 core_exceptions.ServiceUnavailable,
257 ),
258 deadline=60.0,
259 ),
260 default_timeout=60.0,
261 client_info=client_info,
262 ),
263 self.rollback: gapic_v1.method.wrap_method(
264 self.rollback,
265 default_retry=retries.Retry(
266 initial=0.1,
267 maximum=60.0,
268 multiplier=1.3,
269 predicate=retries.if_exception_type(
270 core_exceptions.DeadlineExceeded,
271 core_exceptions.InternalServerError,
272 core_exceptions.ResourceExhausted,
273 core_exceptions.ServiceUnavailable,
274 ),
275 deadline=60.0,
276 ),
277 default_timeout=60.0,
278 client_info=client_info,
279 ),
280 self.run_query: gapic_v1.method.wrap_method(
281 self.run_query,
282 default_retry=retries.Retry(
283 initial=0.1,
284 maximum=60.0,
285 multiplier=1.3,
286 predicate=retries.if_exception_type(
287 core_exceptions.DeadlineExceeded,
288 core_exceptions.InternalServerError,
289 core_exceptions.ResourceExhausted,
290 core_exceptions.ServiceUnavailable,
291 ),
292 deadline=300.0,
293 ),
294 default_timeout=300.0,
295 client_info=client_info,
296 ),
297 self.execute_pipeline: gapic_v1.method.wrap_method(
298 self.execute_pipeline,
299 default_retry=retries.Retry(
300 initial=0.1,
301 maximum=60.0,
302 multiplier=1.3,
303 predicate=retries.if_exception_type(
304 core_exceptions.DeadlineExceeded,
305 core_exceptions.InternalServerError,
306 core_exceptions.ResourceExhausted,
307 core_exceptions.ServiceUnavailable,
308 ),
309 deadline=300.0,
310 ),
311 default_timeout=300.0,
312 client_info=client_info,
313 ),
314 self.run_aggregation_query: gapic_v1.method.wrap_method(
315 self.run_aggregation_query,
316 default_retry=retries.Retry(
317 initial=0.1,
318 maximum=60.0,
319 multiplier=1.3,
320 predicate=retries.if_exception_type(
321 core_exceptions.DeadlineExceeded,
322 core_exceptions.InternalServerError,
323 core_exceptions.ResourceExhausted,
324 core_exceptions.ServiceUnavailable,
325 ),
326 deadline=300.0,
327 ),
328 default_timeout=300.0,
329 client_info=client_info,
330 ),
331 self.partition_query: gapic_v1.method.wrap_method(
332 self.partition_query,
333 default_retry=retries.Retry(
334 initial=0.1,
335 maximum=60.0,
336 multiplier=1.3,
337 predicate=retries.if_exception_type(
338 core_exceptions.DeadlineExceeded,
339 core_exceptions.InternalServerError,
340 core_exceptions.ResourceExhausted,
341 core_exceptions.ServiceUnavailable,
342 ),
343 deadline=300.0,
344 ),
345 default_timeout=300.0,
346 client_info=client_info,
347 ),
348 self.write: gapic_v1.method.wrap_method(
349 self.write,
350 default_timeout=86400.0,
351 client_info=client_info,
352 ),
353 self.listen: gapic_v1.method.wrap_method(
354 self.listen,
355 default_retry=retries.Retry(
356 initial=0.1,
357 maximum=60.0,
358 multiplier=1.3,
359 predicate=retries.if_exception_type(
360 core_exceptions.DeadlineExceeded,
361 core_exceptions.InternalServerError,
362 core_exceptions.ResourceExhausted,
363 core_exceptions.ServiceUnavailable,
364 ),
365 deadline=86400.0,
366 ),
367 default_timeout=86400.0,
368 client_info=client_info,
369 ),
370 self.list_collection_ids: gapic_v1.method.wrap_method(
371 self.list_collection_ids,
372 default_retry=retries.Retry(
373 initial=0.1,
374 maximum=60.0,
375 multiplier=1.3,
376 predicate=retries.if_exception_type(
377 core_exceptions.DeadlineExceeded,
378 core_exceptions.InternalServerError,
379 core_exceptions.ResourceExhausted,
380 core_exceptions.ServiceUnavailable,
381 ),
382 deadline=60.0,
383 ),
384 default_timeout=60.0,
385 client_info=client_info,
386 ),
387 self.batch_write: gapic_v1.method.wrap_method(
388 self.batch_write,
389 default_retry=retries.Retry(
390 initial=0.1,
391 maximum=60.0,
392 multiplier=1.3,
393 predicate=retries.if_exception_type(
394 core_exceptions.Aborted,
395 core_exceptions.ResourceExhausted,
396 core_exceptions.ServiceUnavailable,
397 ),
398 deadline=60.0,
399 ),
400 default_timeout=60.0,
401 client_info=client_info,
402 ),
403 self.create_document: gapic_v1.method.wrap_method(
404 self.create_document,
405 default_retry=retries.Retry(
406 initial=0.1,
407 maximum=60.0,
408 multiplier=1.3,
409 predicate=retries.if_exception_type(
410 core_exceptions.ResourceExhausted,
411 core_exceptions.ServiceUnavailable,
412 ),
413 deadline=60.0,
414 ),
415 default_timeout=60.0,
416 client_info=client_info,
417 ),
418 self.cancel_operation: gapic_v1.method.wrap_method(
419 self.cancel_operation,
420 default_timeout=None,
421 client_info=client_info,
422 ),
423 self.delete_operation: gapic_v1.method.wrap_method(
424 self.delete_operation,
425 default_timeout=None,
426 client_info=client_info,
427 ),
428 self.get_operation: gapic_v1.method.wrap_method(
429 self.get_operation,
430 default_timeout=None,
431 client_info=client_info,
432 ),
433 self.list_operations: gapic_v1.method.wrap_method(
434 self.list_operations,
435 default_timeout=None,
436 client_info=client_info,
437 ),
438 }
439
440 def close(self):
441 """Closes resources associated with the transport.
442
443 .. warning::
444 Only call this method if the transport is NOT shared
445 with other clients - this may cause errors in other clients!
446 """
447 raise NotImplementedError()
448
449 @property
450 def get_document(
451 self,
452 ) -> Callable[
453 [firestore.GetDocumentRequest],
454 Union[document.Document, Awaitable[document.Document]],
455 ]:
456 raise NotImplementedError()
457
458 @property
459 def list_documents(
460 self,
461 ) -> Callable[
462 [firestore.ListDocumentsRequest],
463 Union[
464 firestore.ListDocumentsResponse, Awaitable[firestore.ListDocumentsResponse]
465 ],
466 ]:
467 raise NotImplementedError()
468
469 @property
470 def update_document(
471 self,
472 ) -> Callable[
473 [firestore.UpdateDocumentRequest],
474 Union[gf_document.Document, Awaitable[gf_document.Document]],
475 ]:
476 raise NotImplementedError()
477
478 @property
479 def delete_document(
480 self,
481 ) -> Callable[
482 [firestore.DeleteDocumentRequest],
483 Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
484 ]:
485 raise NotImplementedError()
486
487 @property
488 def batch_get_documents(
489 self,
490 ) -> Callable[
491 [firestore.BatchGetDocumentsRequest],
492 Union[
493 firestore.BatchGetDocumentsResponse,
494 Awaitable[firestore.BatchGetDocumentsResponse],
495 ],
496 ]:
497 raise NotImplementedError()
498
499 @property
500 def begin_transaction(
501 self,
502 ) -> Callable[
503 [firestore.BeginTransactionRequest],
504 Union[
505 firestore.BeginTransactionResponse,
506 Awaitable[firestore.BeginTransactionResponse],
507 ],
508 ]:
509 raise NotImplementedError()
510
511 @property
512 def commit(
513 self,
514 ) -> Callable[
515 [firestore.CommitRequest],
516 Union[firestore.CommitResponse, Awaitable[firestore.CommitResponse]],
517 ]:
518 raise NotImplementedError()
519
520 @property
521 def rollback(
522 self,
523 ) -> Callable[
524 [firestore.RollbackRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
525 ]:
526 raise NotImplementedError()
527
528 @property
529 def run_query(
530 self,
531 ) -> Callable[
532 [firestore.RunQueryRequest],
533 Union[firestore.RunQueryResponse, Awaitable[firestore.RunQueryResponse]],
534 ]:
535 raise NotImplementedError()
536
537 @property
538 def execute_pipeline(
539 self,
540 ) -> Callable[
541 [firestore.ExecutePipelineRequest],
542 Union[
543 firestore.ExecutePipelineResponse,
544 Awaitable[firestore.ExecutePipelineResponse],
545 ],
546 ]:
547 raise NotImplementedError()
548
549 @property
550 def run_aggregation_query(
551 self,
552 ) -> Callable[
553 [firestore.RunAggregationQueryRequest],
554 Union[
555 firestore.RunAggregationQueryResponse,
556 Awaitable[firestore.RunAggregationQueryResponse],
557 ],
558 ]:
559 raise NotImplementedError()
560
561 @property
562 def partition_query(
563 self,
564 ) -> Callable[
565 [firestore.PartitionQueryRequest],
566 Union[
567 firestore.PartitionQueryResponse,
568 Awaitable[firestore.PartitionQueryResponse],
569 ],
570 ]:
571 raise NotImplementedError()
572
573 @property
574 def write(
575 self,
576 ) -> Callable[
577 [firestore.WriteRequest],
578 Union[firestore.WriteResponse, Awaitable[firestore.WriteResponse]],
579 ]:
580 raise NotImplementedError()
581
582 @property
583 def listen(
584 self,
585 ) -> Callable[
586 [firestore.ListenRequest],
587 Union[firestore.ListenResponse, Awaitable[firestore.ListenResponse]],
588 ]:
589 raise NotImplementedError()
590
591 @property
592 def list_collection_ids(
593 self,
594 ) -> Callable[
595 [firestore.ListCollectionIdsRequest],
596 Union[
597 firestore.ListCollectionIdsResponse,
598 Awaitable[firestore.ListCollectionIdsResponse],
599 ],
600 ]:
601 raise NotImplementedError()
602
603 @property
604 def batch_write(
605 self,
606 ) -> Callable[
607 [firestore.BatchWriteRequest],
608 Union[firestore.BatchWriteResponse, Awaitable[firestore.BatchWriteResponse]],
609 ]:
610 raise NotImplementedError()
611
612 @property
613 def create_document(
614 self,
615 ) -> Callable[
616 [firestore.CreateDocumentRequest],
617 Union[document.Document, Awaitable[document.Document]],
618 ]:
619 raise NotImplementedError()
620
621 @property
622 def list_operations(
623 self,
624 ) -> Callable[
625 [operations_pb2.ListOperationsRequest],
626 Union[
627 operations_pb2.ListOperationsResponse,
628 Awaitable[operations_pb2.ListOperationsResponse],
629 ],
630 ]:
631 raise NotImplementedError()
632
633 @property
634 def get_operation(
635 self,
636 ) -> Callable[
637 [operations_pb2.GetOperationRequest],
638 Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
639 ]:
640 raise NotImplementedError()
641
642 @property
643 def cancel_operation(
644 self,
645 ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
646 raise NotImplementedError()
647
648 @property
649 def delete_operation(
650 self,
651 ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:
652 raise NotImplementedError()
653
654 @property
655 def kind(self) -> str:
656 raise NotImplementedError()
657
658
659__all__ = ("FirestoreTransport",)