Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/types/storage.py: 100%
106 statements
« prev ^ index » next coverage.py v7.2.2, created at 2023-03-26 06:10 +0000
« prev ^ index » next coverage.py v7.2.2, created at 2023-03-26 06:10 +0000
1# -*- coding: utf-8 -*-
2# Copyright 2022 Google LLC
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16from __future__ import annotations
18from typing import MutableMapping, MutableSequence
20import proto # type: ignore
22from google.cloud.bigquery_storage_v1.types import arrow
23from google.cloud.bigquery_storage_v1.types import avro
24from google.cloud.bigquery_storage_v1.types import protobuf
25from google.cloud.bigquery_storage_v1.types import stream
26from google.cloud.bigquery_storage_v1.types import table
27from google.protobuf import timestamp_pb2 # type: ignore
28from google.protobuf import wrappers_pb2 # type: ignore
29from google.rpc import status_pb2 # type: ignore
32__protobuf__ = proto.module(
33 package="google.cloud.bigquery.storage.v1",
34 manifest={
35 "CreateReadSessionRequest",
36 "ReadRowsRequest",
37 "ThrottleState",
38 "StreamStats",
39 "ReadRowsResponse",
40 "SplitReadStreamRequest",
41 "SplitReadStreamResponse",
42 "CreateWriteStreamRequest",
43 "AppendRowsRequest",
44 "AppendRowsResponse",
45 "GetWriteStreamRequest",
46 "BatchCommitWriteStreamsRequest",
47 "BatchCommitWriteStreamsResponse",
48 "FinalizeWriteStreamRequest",
49 "FinalizeWriteStreamResponse",
50 "FlushRowsRequest",
51 "FlushRowsResponse",
52 "StorageError",
53 "RowError",
54 },
55)
58class CreateReadSessionRequest(proto.Message):
59 r"""Request message for ``CreateReadSession``.
61 Attributes:
62 parent (str):
63 Required. The request project that owns the session, in the
64 form of ``projects/{project_id}``.
65 read_session (google.cloud.bigquery_storage_v1.types.ReadSession):
66 Required. Session to be created.
67 max_stream_count (int):
68 Max initial number of streams. If unset or zero, the server
69 will provide a value of streams so as to produce reasonable
70 throughput. Must be non-negative. The number of streams may
71 be lower than the requested number, depending on the amount
72 parallelism that is reasonable for the table. There is a
73 default system max limit of 1,000.
75 This must be greater than or equal to
76 preferred_min_stream_count. Typically, clients should either
77 leave this unset to let the system to determine an upper
78 bound OR set this a size for the maximum "units of work" it
79 can gracefully handle.
80 preferred_min_stream_count (int):
81 The minimum preferred stream count. This
82 parameter can be used to inform the service that
83 there is a desired lower bound on the number of
84 streams. This is typically a target parallelism
85 of the client (e.g. a Spark cluster with
86 N-workers would set this to a low multiple of N
87 to ensure good cluster utilization).
89 The system will make a best effort to provide at
90 least this number of streams, but in some cases
91 might provide less.
92 """
94 parent: str = proto.Field(
95 proto.STRING,
96 number=1,
97 )
98 read_session: stream.ReadSession = proto.Field(
99 proto.MESSAGE,
100 number=2,
101 message=stream.ReadSession,
102 )
103 max_stream_count: int = proto.Field(
104 proto.INT32,
105 number=3,
106 )
107 preferred_min_stream_count: int = proto.Field(
108 proto.INT32,
109 number=4,
110 )
113class ReadRowsRequest(proto.Message):
114 r"""Request message for ``ReadRows``.
116 Attributes:
117 read_stream (str):
118 Required. Stream to read rows from.
119 offset (int):
120 The offset requested must be less than the
121 last row read from Read. Requesting a larger
122 offset is undefined. If not specified, start
123 reading from offset zero.
124 """
126 read_stream: str = proto.Field(
127 proto.STRING,
128 number=1,
129 )
130 offset: int = proto.Field(
131 proto.INT64,
132 number=2,
133 )
136class ThrottleState(proto.Message):
137 r"""Information on if the current connection is being throttled.
139 Attributes:
140 throttle_percent (int):
141 How much this connection is being throttled.
142 Zero means no throttling, 100 means fully
143 throttled.
144 """
146 throttle_percent: int = proto.Field(
147 proto.INT32,
148 number=1,
149 )
152class StreamStats(proto.Message):
153 r"""Estimated stream statistics for a given read Stream.
155 Attributes:
156 progress (google.cloud.bigquery_storage_v1.types.StreamStats.Progress):
157 Represents the progress of the current
158 stream.
159 """
161 class Progress(proto.Message):
162 r"""
164 Attributes:
165 at_response_start (float):
166 The fraction of rows assigned to the stream that have been
167 processed by the server so far, not including the rows in
168 the current response message.
170 This value, along with ``at_response_end``, can be used to
171 interpolate the progress made as the rows in the message are
172 being processed using the following formula:
173 ``at_response_start + (at_response_end - at_response_start) * rows_processed_from_response / rows_in_response``.
175 Note that if a filter is provided, the ``at_response_end``
176 value of the previous response may not necessarily be equal
177 to the ``at_response_start`` value of the current response.
178 at_response_end (float):
179 Similar to ``at_response_start``, except that this value
180 includes the rows in the current response.
181 """
183 at_response_start: float = proto.Field(
184 proto.DOUBLE,
185 number=1,
186 )
187 at_response_end: float = proto.Field(
188 proto.DOUBLE,
189 number=2,
190 )
192 progress: Progress = proto.Field(
193 proto.MESSAGE,
194 number=2,
195 message=Progress,
196 )
199class ReadRowsResponse(proto.Message):
200 r"""Response from calling ``ReadRows`` may include row data, progress
201 and throttling information.
203 This message has `oneof`_ fields (mutually exclusive fields).
204 For each oneof, at most one member field can be set at the same time.
205 Setting any member of the oneof automatically clears all other
206 members.
208 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
210 Attributes:
211 avro_rows (google.cloud.bigquery_storage_v1.types.AvroRows):
212 Serialized row data in AVRO format.
214 This field is a member of `oneof`_ ``rows``.
215 arrow_record_batch (google.cloud.bigquery_storage_v1.types.ArrowRecordBatch):
216 Serialized row data in Arrow RecordBatch
217 format.
219 This field is a member of `oneof`_ ``rows``.
220 row_count (int):
221 Number of serialized rows in the rows block.
222 stats (google.cloud.bigquery_storage_v1.types.StreamStats):
223 Statistics for the stream.
224 throttle_state (google.cloud.bigquery_storage_v1.types.ThrottleState):
225 Throttling state. If unset, the latest
226 response still describes the current throttling
227 status.
228 avro_schema (google.cloud.bigquery_storage_v1.types.AvroSchema):
229 Output only. Avro schema.
231 This field is a member of `oneof`_ ``schema``.
232 arrow_schema (google.cloud.bigquery_storage_v1.types.ArrowSchema):
233 Output only. Arrow schema.
235 This field is a member of `oneof`_ ``schema``.
236 """
238 avro_rows: avro.AvroRows = proto.Field(
239 proto.MESSAGE,
240 number=3,
241 oneof="rows",
242 message=avro.AvroRows,
243 )
244 arrow_record_batch: arrow.ArrowRecordBatch = proto.Field(
245 proto.MESSAGE,
246 number=4,
247 oneof="rows",
248 message=arrow.ArrowRecordBatch,
249 )
250 row_count: int = proto.Field(
251 proto.INT64,
252 number=6,
253 )
254 stats: "StreamStats" = proto.Field(
255 proto.MESSAGE,
256 number=2,
257 message="StreamStats",
258 )
259 throttle_state: "ThrottleState" = proto.Field(
260 proto.MESSAGE,
261 number=5,
262 message="ThrottleState",
263 )
264 avro_schema: avro.AvroSchema = proto.Field(
265 proto.MESSAGE,
266 number=7,
267 oneof="schema",
268 message=avro.AvroSchema,
269 )
270 arrow_schema: arrow.ArrowSchema = proto.Field(
271 proto.MESSAGE,
272 number=8,
273 oneof="schema",
274 message=arrow.ArrowSchema,
275 )
278class SplitReadStreamRequest(proto.Message):
279 r"""Request message for ``SplitReadStream``.
281 Attributes:
282 name (str):
283 Required. Name of the stream to split.
284 fraction (float):
285 A value in the range (0.0, 1.0) that
286 specifies the fractional point at which the
287 original stream should be split. The actual
288 split point is evaluated on pre-filtered rows,
289 so if a filter is provided, then there is no
290 guarantee that the division of the rows between
291 the new child streams will be proportional to
292 this fractional value. Additionally, because the
293 server-side unit for assigning data is
294 collections of rows, this fraction will always
295 map to a data storage boundary on the server
296 side.
297 """
299 name: str = proto.Field(
300 proto.STRING,
301 number=1,
302 )
303 fraction: float = proto.Field(
304 proto.DOUBLE,
305 number=2,
306 )
309class SplitReadStreamResponse(proto.Message):
310 r"""Response message for ``SplitReadStream``.
312 Attributes:
313 primary_stream (google.cloud.bigquery_storage_v1.types.ReadStream):
314 Primary stream, which contains the beginning portion of
315 \|original_stream|. An empty value indicates that the
316 original stream can no longer be split.
317 remainder_stream (google.cloud.bigquery_storage_v1.types.ReadStream):
318 Remainder stream, which contains the tail of
319 \|original_stream|. An empty value indicates that the
320 original stream can no longer be split.
321 """
323 primary_stream: stream.ReadStream = proto.Field(
324 proto.MESSAGE,
325 number=1,
326 message=stream.ReadStream,
327 )
328 remainder_stream: stream.ReadStream = proto.Field(
329 proto.MESSAGE,
330 number=2,
331 message=stream.ReadStream,
332 )
335class CreateWriteStreamRequest(proto.Message):
336 r"""Request message for ``CreateWriteStream``.
338 Attributes:
339 parent (str):
340 Required. Reference to the table to which the stream
341 belongs, in the format of
342 ``projects/{project}/datasets/{dataset}/tables/{table}``.
343 write_stream (google.cloud.bigquery_storage_v1.types.WriteStream):
344 Required. Stream to be created.
345 """
347 parent: str = proto.Field(
348 proto.STRING,
349 number=1,
350 )
351 write_stream: stream.WriteStream = proto.Field(
352 proto.MESSAGE,
353 number=2,
354 message=stream.WriteStream,
355 )
358class AppendRowsRequest(proto.Message):
359 r"""Request message for ``AppendRows``.
361 Due to the nature of AppendRows being a bidirectional streaming RPC,
362 certain parts of the AppendRowsRequest need only be specified for
363 the first request sent each time the gRPC network connection is
364 opened/reopened.
366 The size of a single AppendRowsRequest must be less than 10 MB in
367 size. Requests larger than this return an error, typically
368 ``INVALID_ARGUMENT``.
371 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
373 Attributes:
374 write_stream (str):
375 Required. The write_stream identifies the target of the
376 append operation, and only needs to be specified as part of
377 the first request on the gRPC connection. If provided for
378 subsequent requests, it must match the value of the first
379 request.
381 For explicitly created write streams, the format is:
383 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}``
385 For the special default stream, the format is:
387 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/_default``.
388 offset (google.protobuf.wrappers_pb2.Int64Value):
389 If present, the write is only performed if the next append
390 offset is same as the provided value. If not present, the
391 write is performed at the current end of stream. Specifying
392 a value for this field is not allowed when calling
393 AppendRows for the '_default' stream.
394 proto_rows (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.ProtoData):
395 Rows in proto format.
397 This field is a member of `oneof`_ ``rows``.
398 trace_id (str):
399 Id set by client to annotate its identity.
400 Only initial request setting is respected.
401 missing_value_interpretations (MutableMapping[str, google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation]):
402 A map to indicate how to interpret missing value for some
403 fields. Missing values are fields present in user schema but
404 missing in rows. The key is the field name. The value is the
405 interpretation of missing values for the field.
407 For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE}
408 means all missing values in field foo are interpreted as
409 NULL, all missing values in field bar are interpreted as the
410 default value of field bar in table schema.
412 If a field is not in this map and has missing values, the
413 missing values in this field are interpreted as NULL.
415 This field only applies to the current request, it won't
416 affect other requests on the connection.
418 Currently, field name can only be top-level column name,
419 can't be a struct field path like 'foo.bar'.
420 """
422 class MissingValueInterpretation(proto.Enum):
423 r"""An enum to indicate how to interpret missing values. Missing
424 values are fields present in user schema but missing in rows. A
425 missing value can represent a NULL or a column default value
426 defined in BigQuery table schema.
428 Values:
429 MISSING_VALUE_INTERPRETATION_UNSPECIFIED (0):
430 Invalid missing value interpretation.
431 Requests with this value will be rejected.
432 NULL_VALUE (1):
433 Missing value is interpreted as NULL.
434 DEFAULT_VALUE (2):
435 Missing value is interpreted as column
436 default value if declared in the table schema,
437 NULL otherwise.
438 """
439 MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0
440 NULL_VALUE = 1
441 DEFAULT_VALUE = 2
443 class ProtoData(proto.Message):
444 r"""ProtoData contains the data rows and schema when constructing
445 append requests.
447 Attributes:
448 writer_schema (google.cloud.bigquery_storage_v1.types.ProtoSchema):
449 Proto schema used to serialize the data.
450 This value only needs to be provided as part of
451 the first request on a gRPC network connection,
452 and will be ignored for subsequent requests on
453 the connection.
454 rows (google.cloud.bigquery_storage_v1.types.ProtoRows):
455 Serialized row data in protobuf message
456 format. Currently, the backend expects the
457 serialized rows to adhere to proto2 semantics
458 when appending rows, particularly with respect
459 to how default values are encoded.
460 """
462 writer_schema: protobuf.ProtoSchema = proto.Field(
463 proto.MESSAGE,
464 number=1,
465 message=protobuf.ProtoSchema,
466 )
467 rows: protobuf.ProtoRows = proto.Field(
468 proto.MESSAGE,
469 number=2,
470 message=protobuf.ProtoRows,
471 )
473 write_stream: str = proto.Field(
474 proto.STRING,
475 number=1,
476 )
477 offset: wrappers_pb2.Int64Value = proto.Field(
478 proto.MESSAGE,
479 number=2,
480 message=wrappers_pb2.Int64Value,
481 )
482 proto_rows: ProtoData = proto.Field(
483 proto.MESSAGE,
484 number=4,
485 oneof="rows",
486 message=ProtoData,
487 )
488 trace_id: str = proto.Field(
489 proto.STRING,
490 number=6,
491 )
492 missing_value_interpretations: MutableMapping[
493 str, MissingValueInterpretation
494 ] = proto.MapField(
495 proto.STRING,
496 proto.ENUM,
497 number=7,
498 enum=MissingValueInterpretation,
499 )
502class AppendRowsResponse(proto.Message):
503 r"""Response message for ``AppendRows``.
505 This message has `oneof`_ fields (mutually exclusive fields).
506 For each oneof, at most one member field can be set at the same time.
507 Setting any member of the oneof automatically clears all other
508 members.
510 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
512 Attributes:
513 append_result (google.cloud.bigquery_storage_v1.types.AppendRowsResponse.AppendResult):
514 Result if the append is successful.
516 This field is a member of `oneof`_ ``response``.
517 error (google.rpc.status_pb2.Status):
518 Error returned when problems were encountered. If present,
519 it indicates rows were not accepted into the system. Users
520 can retry or continue with other append requests within the
521 same connection.
523 Additional information about error signalling:
525 ALREADY_EXISTS: Happens when an append specified an offset,
526 and the backend already has received data at this offset.
527 Typically encountered in retry scenarios, and can be
528 ignored.
530 OUT_OF_RANGE: Returned when the specified offset in the
531 stream is beyond the current end of the stream.
533 INVALID_ARGUMENT: Indicates a malformed request or data.
535 ABORTED: Request processing is aborted because of prior
536 failures. The request can be retried if previous failure is
537 addressed.
539 INTERNAL: Indicates server side error(s) that can be
540 retried.
542 This field is a member of `oneof`_ ``response``.
543 updated_schema (google.cloud.bigquery_storage_v1.types.TableSchema):
544 If backend detects a schema update, pass it
545 to user so that user can use it to input new
546 type of message. It will be empty when no schema
547 updates have occurred.
548 row_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.RowError]):
549 If a request failed due to corrupted rows, no
550 rows in the batch will be appended. The API will
551 return row level error info, so that the caller
552 can remove the bad rows and retry the request.
553 write_stream (str):
554 The target of the append operation. Matches the write_stream
555 in the corresponding request.
556 """
558 class AppendResult(proto.Message):
559 r"""AppendResult is returned for successful append requests.
561 Attributes:
562 offset (google.protobuf.wrappers_pb2.Int64Value):
563 The row offset at which the last append
564 occurred. The offset will not be set if
565 appending using default streams.
566 """
568 offset: wrappers_pb2.Int64Value = proto.Field(
569 proto.MESSAGE,
570 number=1,
571 message=wrappers_pb2.Int64Value,
572 )
574 append_result: AppendResult = proto.Field(
575 proto.MESSAGE,
576 number=1,
577 oneof="response",
578 message=AppendResult,
579 )
580 error: status_pb2.Status = proto.Field(
581 proto.MESSAGE,
582 number=2,
583 oneof="response",
584 message=status_pb2.Status,
585 )
586 updated_schema: table.TableSchema = proto.Field(
587 proto.MESSAGE,
588 number=3,
589 message=table.TableSchema,
590 )
591 row_errors: MutableSequence["RowError"] = proto.RepeatedField(
592 proto.MESSAGE,
593 number=4,
594 message="RowError",
595 )
596 write_stream: str = proto.Field(
597 proto.STRING,
598 number=5,
599 )
602class GetWriteStreamRequest(proto.Message):
603 r"""Request message for ``GetWriteStreamRequest``.
605 Attributes:
606 name (str):
607 Required. Name of the stream to get, in the form of
608 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
609 view (google.cloud.bigquery_storage_v1.types.WriteStreamView):
610 Indicates whether to get full or partial view
611 of the WriteStream. If not set, view returned
612 will be basic.
613 """
615 name: str = proto.Field(
616 proto.STRING,
617 number=1,
618 )
619 view: stream.WriteStreamView = proto.Field(
620 proto.ENUM,
621 number=3,
622 enum=stream.WriteStreamView,
623 )
626class BatchCommitWriteStreamsRequest(proto.Message):
627 r"""Request message for ``BatchCommitWriteStreams``.
629 Attributes:
630 parent (str):
631 Required. Parent table that all the streams should belong
632 to, in the form of
633 ``projects/{project}/datasets/{dataset}/tables/{table}``.
634 write_streams (MutableSequence[str]):
635 Required. The group of streams that will be
636 committed atomically.
637 """
639 parent: str = proto.Field(
640 proto.STRING,
641 number=1,
642 )
643 write_streams: MutableSequence[str] = proto.RepeatedField(
644 proto.STRING,
645 number=2,
646 )
649class BatchCommitWriteStreamsResponse(proto.Message):
650 r"""Response message for ``BatchCommitWriteStreams``.
652 Attributes:
653 commit_time (google.protobuf.timestamp_pb2.Timestamp):
654 The time at which streams were committed in microseconds
655 granularity. This field will only exist when there are no
656 stream errors. **Note** if this field is not set, it means
657 the commit was not successful.
658 stream_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.StorageError]):
659 Stream level error if commit failed. Only
660 streams with error will be in the list.
661 If empty, there is no error and all streams are
662 committed successfully. If non empty, certain
663 streams have errors and ZERO stream is committed
664 due to atomicity guarantee.
665 """
667 commit_time: timestamp_pb2.Timestamp = proto.Field(
668 proto.MESSAGE,
669 number=1,
670 message=timestamp_pb2.Timestamp,
671 )
672 stream_errors: MutableSequence["StorageError"] = proto.RepeatedField(
673 proto.MESSAGE,
674 number=2,
675 message="StorageError",
676 )
679class FinalizeWriteStreamRequest(proto.Message):
680 r"""Request message for invoking ``FinalizeWriteStream``.
682 Attributes:
683 name (str):
684 Required. Name of the stream to finalize, in the form of
685 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
686 """
688 name: str = proto.Field(
689 proto.STRING,
690 number=1,
691 )
694class FinalizeWriteStreamResponse(proto.Message):
695 r"""Response message for ``FinalizeWriteStream``.
697 Attributes:
698 row_count (int):
699 Number of rows in the finalized stream.
700 """
702 row_count: int = proto.Field(
703 proto.INT64,
704 number=1,
705 )
708class FlushRowsRequest(proto.Message):
709 r"""Request message for ``FlushRows``.
711 Attributes:
712 write_stream (str):
713 Required. The stream that is the target of
714 the flush operation.
715 offset (google.protobuf.wrappers_pb2.Int64Value):
716 Ending offset of the flush operation. Rows
717 before this offset(including this offset) will
718 be flushed.
719 """
721 write_stream: str = proto.Field(
722 proto.STRING,
723 number=1,
724 )
725 offset: wrappers_pb2.Int64Value = proto.Field(
726 proto.MESSAGE,
727 number=2,
728 message=wrappers_pb2.Int64Value,
729 )
732class FlushRowsResponse(proto.Message):
733 r"""Respond message for ``FlushRows``.
735 Attributes:
736 offset (int):
737 The rows before this offset (including this
738 offset) are flushed.
739 """
741 offset: int = proto.Field(
742 proto.INT64,
743 number=1,
744 )
747class StorageError(proto.Message):
748 r"""Structured custom BigQuery Storage error message. The error
749 can be attached as error details in the returned rpc Status. In
750 particular, the use of error codes allows more structured error
751 handling, and reduces the need to evaluate unstructured error
752 text strings.
754 Attributes:
755 code (google.cloud.bigquery_storage_v1.types.StorageError.StorageErrorCode):
756 BigQuery Storage specific error code.
757 entity (str):
758 Name of the failed entity.
759 error_message (str):
760 Message that describes the error.
761 """
763 class StorageErrorCode(proto.Enum):
764 r"""Error code for ``StorageError``.
766 Values:
767 STORAGE_ERROR_CODE_UNSPECIFIED (0):
768 Default error.
769 TABLE_NOT_FOUND (1):
770 Table is not found in the system.
771 STREAM_ALREADY_COMMITTED (2):
772 Stream is already committed.
773 STREAM_NOT_FOUND (3):
774 Stream is not found.
775 INVALID_STREAM_TYPE (4):
776 Invalid Stream type.
777 For example, you try to commit a stream that is
778 not pending.
779 INVALID_STREAM_STATE (5):
780 Invalid Stream state.
781 For example, you try to commit a stream that is
782 not finalized or is garbaged.
783 STREAM_FINALIZED (6):
784 Stream is finalized.
785 SCHEMA_MISMATCH_EXTRA_FIELDS (7):
786 There is a schema mismatch and it is caused
787 by user schema has extra field than bigquery
788 schema.
789 OFFSET_ALREADY_EXISTS (8):
790 Offset already exists.
791 OFFSET_OUT_OF_RANGE (9):
792 Offset out of range.
793 """
794 STORAGE_ERROR_CODE_UNSPECIFIED = 0
795 TABLE_NOT_FOUND = 1
796 STREAM_ALREADY_COMMITTED = 2
797 STREAM_NOT_FOUND = 3
798 INVALID_STREAM_TYPE = 4
799 INVALID_STREAM_STATE = 5
800 STREAM_FINALIZED = 6
801 SCHEMA_MISMATCH_EXTRA_FIELDS = 7
802 OFFSET_ALREADY_EXISTS = 8
803 OFFSET_OUT_OF_RANGE = 9
805 code: StorageErrorCode = proto.Field(
806 proto.ENUM,
807 number=1,
808 enum=StorageErrorCode,
809 )
810 entity: str = proto.Field(
811 proto.STRING,
812 number=2,
813 )
814 error_message: str = proto.Field(
815 proto.STRING,
816 number=3,
817 )
820class RowError(proto.Message):
821 r"""The message that presents row level error info in a request.
823 Attributes:
824 index (int):
825 Index of the malformed row in the request.
826 code (google.cloud.bigquery_storage_v1.types.RowError.RowErrorCode):
827 Structured error reason for a row error.
828 message (str):
829 Description of the issue encountered when
830 processing the row.
831 """
833 class RowErrorCode(proto.Enum):
834 r"""Error code for ``RowError``.
836 Values:
837 ROW_ERROR_CODE_UNSPECIFIED (0):
838 Default error.
839 FIELDS_ERROR (1):
840 One or more fields in the row has errors.
841 """
842 ROW_ERROR_CODE_UNSPECIFIED = 0
843 FIELDS_ERROR = 1
845 index: int = proto.Field(
846 proto.INT64,
847 number=1,
848 )
849 code: RowErrorCode = proto.Field(
850 proto.ENUM,
851 number=2,
852 enum=RowErrorCode,
853 )
854 message: str = proto.Field(
855 proto.STRING,
856 number=3,
857 )
860__all__ = tuple(sorted(__protobuf__.manifest))