1# -*- coding: utf-8 -*-
2# Copyright 2024 Google LLC
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16from __future__ import annotations
17
18from typing import MutableMapping, MutableSequence
19
20from google.protobuf import timestamp_pb2 # type: ignore
21from google.protobuf import wrappers_pb2 # type: ignore
22from google.rpc import status_pb2 # type: ignore
23import proto # type: ignore
24
25from google.cloud.bigquery_storage_v1.types import arrow, avro, protobuf, stream, table
26
27__protobuf__ = proto.module(
28 package="google.cloud.bigquery.storage.v1",
29 manifest={
30 "CreateReadSessionRequest",
31 "ReadRowsRequest",
32 "ThrottleState",
33 "StreamStats",
34 "ReadRowsResponse",
35 "SplitReadStreamRequest",
36 "SplitReadStreamResponse",
37 "CreateWriteStreamRequest",
38 "AppendRowsRequest",
39 "AppendRowsResponse",
40 "GetWriteStreamRequest",
41 "BatchCommitWriteStreamsRequest",
42 "BatchCommitWriteStreamsResponse",
43 "FinalizeWriteStreamRequest",
44 "FinalizeWriteStreamResponse",
45 "FlushRowsRequest",
46 "FlushRowsResponse",
47 "StorageError",
48 "RowError",
49 },
50)
51
52
53class CreateReadSessionRequest(proto.Message):
54 r"""Request message for ``CreateReadSession``.
55
56 Attributes:
57 parent (str):
58 Required. The request project that owns the session, in the
59 form of ``projects/{project_id}``.
60 read_session (google.cloud.bigquery_storage_v1.types.ReadSession):
61 Required. Session to be created.
62 max_stream_count (int):
63 Max initial number of streams. If unset or zero, the server
64 will provide a value of streams so as to produce reasonable
65 throughput. Must be non-negative. The number of streams may
66 be lower than the requested number, depending on the amount
67 parallelism that is reasonable for the table. There is a
68 default system max limit of 1,000.
69
70 This must be greater than or equal to
71 preferred_min_stream_count. Typically, clients should either
72 leave this unset to let the system to determine an upper
73 bound OR set this a size for the maximum "units of work" it
74 can gracefully handle.
75 preferred_min_stream_count (int):
76 The minimum preferred stream count. This
77 parameter can be used to inform the service that
78 there is a desired lower bound on the number of
79 streams. This is typically a target parallelism
80 of the client (e.g. a Spark cluster with
81 N-workers would set this to a low multiple of N
82 to ensure good cluster utilization).
83
84 The system will make a best effort to provide at
85 least this number of streams, but in some cases
86 might provide less.
87 """
88
89 parent: str = proto.Field(
90 proto.STRING,
91 number=1,
92 )
93 read_session: stream.ReadSession = proto.Field(
94 proto.MESSAGE,
95 number=2,
96 message=stream.ReadSession,
97 )
98 max_stream_count: int = proto.Field(
99 proto.INT32,
100 number=3,
101 )
102 preferred_min_stream_count: int = proto.Field(
103 proto.INT32,
104 number=4,
105 )
106
107
108class ReadRowsRequest(proto.Message):
109 r"""Request message for ``ReadRows``.
110
111 Attributes:
112 read_stream (str):
113 Required. Stream to read rows from.
114 offset (int):
115 The offset requested must be less than the
116 last row read from Read. Requesting a larger
117 offset is undefined. If not specified, start
118 reading from offset zero.
119 """
120
121 read_stream: str = proto.Field(
122 proto.STRING,
123 number=1,
124 )
125 offset: int = proto.Field(
126 proto.INT64,
127 number=2,
128 )
129
130
131class ThrottleState(proto.Message):
132 r"""Information on if the current connection is being throttled.
133
134 Attributes:
135 throttle_percent (int):
136 How much this connection is being throttled.
137 Zero means no throttling, 100 means fully
138 throttled.
139 """
140
141 throttle_percent: int = proto.Field(
142 proto.INT32,
143 number=1,
144 )
145
146
147class StreamStats(proto.Message):
148 r"""Estimated stream statistics for a given read Stream.
149
150 Attributes:
151 progress (google.cloud.bigquery_storage_v1.types.StreamStats.Progress):
152 Represents the progress of the current
153 stream.
154 """
155
156 class Progress(proto.Message):
157 r"""
158
159 Attributes:
160 at_response_start (float):
161 The fraction of rows assigned to the stream that have been
162 processed by the server so far, not including the rows in
163 the current response message.
164
165 This value, along with ``at_response_end``, can be used to
166 interpolate the progress made as the rows in the message are
167 being processed using the following formula:
168 ``at_response_start + (at_response_end - at_response_start) * rows_processed_from_response / rows_in_response``.
169
170 Note that if a filter is provided, the ``at_response_end``
171 value of the previous response may not necessarily be equal
172 to the ``at_response_start`` value of the current response.
173 at_response_end (float):
174 Similar to ``at_response_start``, except that this value
175 includes the rows in the current response.
176 """
177
178 at_response_start: float = proto.Field(
179 proto.DOUBLE,
180 number=1,
181 )
182 at_response_end: float = proto.Field(
183 proto.DOUBLE,
184 number=2,
185 )
186
187 progress: Progress = proto.Field(
188 proto.MESSAGE,
189 number=2,
190 message=Progress,
191 )
192
193
194class ReadRowsResponse(proto.Message):
195 r"""Response from calling ``ReadRows`` may include row data, progress
196 and throttling information.
197
198 This message has `oneof`_ fields (mutually exclusive fields).
199 For each oneof, at most one member field can be set at the same time.
200 Setting any member of the oneof automatically clears all other
201 members.
202
203 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
204
205 Attributes:
206 avro_rows (google.cloud.bigquery_storage_v1.types.AvroRows):
207 Serialized row data in AVRO format.
208
209 This field is a member of `oneof`_ ``rows``.
210 arrow_record_batch (google.cloud.bigquery_storage_v1.types.ArrowRecordBatch):
211 Serialized row data in Arrow RecordBatch
212 format.
213
214 This field is a member of `oneof`_ ``rows``.
215 row_count (int):
216 Number of serialized rows in the rows block.
217 stats (google.cloud.bigquery_storage_v1.types.StreamStats):
218 Statistics for the stream.
219 throttle_state (google.cloud.bigquery_storage_v1.types.ThrottleState):
220 Throttling state. If unset, the latest
221 response still describes the current throttling
222 status.
223 avro_schema (google.cloud.bigquery_storage_v1.types.AvroSchema):
224 Output only. Avro schema.
225
226 This field is a member of `oneof`_ ``schema``.
227 arrow_schema (google.cloud.bigquery_storage_v1.types.ArrowSchema):
228 Output only. Arrow schema.
229
230 This field is a member of `oneof`_ ``schema``.
231 uncompressed_byte_size (int):
232 Optional. If the row data in this ReadRowsResponse is
233 compressed, then uncompressed byte size is the original size
234 of the uncompressed row data. If it is set to a value
235 greater than 0, then decompress into a buffer of size
236 uncompressed_byte_size using the compression codec that was
237 requested during session creation time and which is
238 specified in TableReadOptions.response_compression_codec in
239 ReadSession. This value is not set if no
240 response_compression_codec was not requested and it is -1 if
241 the requested compression would not have reduced the size of
242 this ReadRowsResponse's row data. This attempts to match
243 Apache Arrow's behavior described here
244 https://github.com/apache/arrow/issues/15102 where the
245 uncompressed length may be set to -1 to indicate that the
246 data that follows is not compressed, which can be useful for
247 cases where compression does not yield appreciable savings.
248 When uncompressed_byte_size is not greater than 0, the
249 client should skip decompression.
250
251 This field is a member of `oneof`_ ``_uncompressed_byte_size``.
252 """
253
254 avro_rows: avro.AvroRows = proto.Field(
255 proto.MESSAGE,
256 number=3,
257 oneof="rows",
258 message=avro.AvroRows,
259 )
260 arrow_record_batch: arrow.ArrowRecordBatch = proto.Field(
261 proto.MESSAGE,
262 number=4,
263 oneof="rows",
264 message=arrow.ArrowRecordBatch,
265 )
266 row_count: int = proto.Field(
267 proto.INT64,
268 number=6,
269 )
270 stats: "StreamStats" = proto.Field(
271 proto.MESSAGE,
272 number=2,
273 message="StreamStats",
274 )
275 throttle_state: "ThrottleState" = proto.Field(
276 proto.MESSAGE,
277 number=5,
278 message="ThrottleState",
279 )
280 avro_schema: avro.AvroSchema = proto.Field(
281 proto.MESSAGE,
282 number=7,
283 oneof="schema",
284 message=avro.AvroSchema,
285 )
286 arrow_schema: arrow.ArrowSchema = proto.Field(
287 proto.MESSAGE,
288 number=8,
289 oneof="schema",
290 message=arrow.ArrowSchema,
291 )
292 uncompressed_byte_size: int = proto.Field(
293 proto.INT64,
294 number=9,
295 optional=True,
296 )
297
298
299class SplitReadStreamRequest(proto.Message):
300 r"""Request message for ``SplitReadStream``.
301
302 Attributes:
303 name (str):
304 Required. Name of the stream to split.
305 fraction (float):
306 A value in the range (0.0, 1.0) that
307 specifies the fractional point at which the
308 original stream should be split. The actual
309 split point is evaluated on pre-filtered rows,
310 so if a filter is provided, then there is no
311 guarantee that the division of the rows between
312 the new child streams will be proportional to
313 this fractional value. Additionally, because the
314 server-side unit for assigning data is
315 collections of rows, this fraction will always
316 map to a data storage boundary on the server
317 side.
318 """
319
320 name: str = proto.Field(
321 proto.STRING,
322 number=1,
323 )
324 fraction: float = proto.Field(
325 proto.DOUBLE,
326 number=2,
327 )
328
329
330class SplitReadStreamResponse(proto.Message):
331 r"""Response message for ``SplitReadStream``.
332
333 Attributes:
334 primary_stream (google.cloud.bigquery_storage_v1.types.ReadStream):
335 Primary stream, which contains the beginning portion of
336 \|original_stream|. An empty value indicates that the
337 original stream can no longer be split.
338 remainder_stream (google.cloud.bigquery_storage_v1.types.ReadStream):
339 Remainder stream, which contains the tail of
340 \|original_stream|. An empty value indicates that the
341 original stream can no longer be split.
342 """
343
344 primary_stream: stream.ReadStream = proto.Field(
345 proto.MESSAGE,
346 number=1,
347 message=stream.ReadStream,
348 )
349 remainder_stream: stream.ReadStream = proto.Field(
350 proto.MESSAGE,
351 number=2,
352 message=stream.ReadStream,
353 )
354
355
356class CreateWriteStreamRequest(proto.Message):
357 r"""Request message for ``CreateWriteStream``.
358
359 Attributes:
360 parent (str):
361 Required. Reference to the table to which the stream
362 belongs, in the format of
363 ``projects/{project}/datasets/{dataset}/tables/{table}``.
364 write_stream (google.cloud.bigquery_storage_v1.types.WriteStream):
365 Required. Stream to be created.
366 """
367
368 parent: str = proto.Field(
369 proto.STRING,
370 number=1,
371 )
372 write_stream: stream.WriteStream = proto.Field(
373 proto.MESSAGE,
374 number=2,
375 message=stream.WriteStream,
376 )
377
378
379class AppendRowsRequest(proto.Message):
380 r"""Request message for ``AppendRows``.
381
382 Because AppendRows is a bidirectional streaming RPC, certain parts
383 of the AppendRowsRequest need only be specified for the first
384 request before switching table destinations. You can also switch
385 table destinations within the same connection for the default
386 stream.
387
388 The size of a single AppendRowsRequest must be less than 10 MB in
389 size. Requests larger than this return an error, typically
390 ``INVALID_ARGUMENT``.
391
392
393 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
394
395 Attributes:
396 write_stream (str):
397 Required. The write_stream identifies the append operation.
398 It must be provided in the following scenarios:
399
400 - In the first request to an AppendRows connection.
401
402 - In all subsequent requests to an AppendRows connection,
403 if you use the same connection to write to multiple
404 tables or change the input schema for default streams.
405
406 For explicitly created write streams, the format is:
407
408 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}``
409
410 For the special default stream, the format is:
411
412 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/_default``.
413
414 An example of a possible sequence of requests with
415 write_stream fields within a single connection:
416
417 - r1: {write_stream: stream_name_1}
418
419 - r2: {write_stream: /*omit*/}
420
421 - r3: {write_stream: /*omit*/}
422
423 - r4: {write_stream: stream_name_2}
424
425 - r5: {write_stream: stream_name_2}
426
427 The destination changed in request_4, so the write_stream
428 field must be populated in all subsequent requests in this
429 stream.
430 offset (google.protobuf.wrappers_pb2.Int64Value):
431 If present, the write is only performed if the next append
432 offset is same as the provided value. If not present, the
433 write is performed at the current end of stream. Specifying
434 a value for this field is not allowed when calling
435 AppendRows for the '_default' stream.
436 proto_rows (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.ProtoData):
437 Rows in proto format.
438
439 This field is a member of `oneof`_ ``rows``.
440 trace_id (str):
441 Id set by client to annotate its identity.
442 Only initial request setting is respected.
443 missing_value_interpretations (MutableMapping[str, google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation]):
444 A map to indicate how to interpret missing value for some
445 fields. Missing values are fields present in user schema but
446 missing in rows. The key is the field name. The value is the
447 interpretation of missing values for the field.
448
449 For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE}
450 means all missing values in field foo are interpreted as
451 NULL, all missing values in field bar are interpreted as the
452 default value of field bar in table schema.
453
454 If a field is not in this map and has missing values, the
455 missing values in this field are interpreted as NULL.
456
457 This field only applies to the current request, it won't
458 affect other requests on the connection.
459
460 Currently, field name can only be top-level column name,
461 can't be a struct field path like 'foo.bar'.
462 default_missing_value_interpretation (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation):
463 Optional. Default missing value interpretation for all
464 columns in the table. When a value is specified on an
465 ``AppendRowsRequest``, it is applied to all requests on the
466 connection from that point forward, until a subsequent
467 ``AppendRowsRequest`` sets it to a different value.
468 ``missing_value_interpretation`` can override
469 ``default_missing_value_interpretation``. For example, if
470 you want to write ``NULL`` instead of using default values
471 for some columns, you can set
472 ``default_missing_value_interpretation`` to
473 ``DEFAULT_VALUE`` and at the same time, set
474 ``missing_value_interpretations`` to ``NULL_VALUE`` on those
475 columns.
476 """
477
478 class MissingValueInterpretation(proto.Enum):
479 r"""An enum to indicate how to interpret missing values of fields
480 that are present in user schema but missing in rows. A missing
481 value can represent a NULL or a column default value defined in
482 BigQuery table schema.
483
484 Values:
485 MISSING_VALUE_INTERPRETATION_UNSPECIFIED (0):
486 Invalid missing value interpretation.
487 Requests with this value will be rejected.
488 NULL_VALUE (1):
489 Missing value is interpreted as NULL.
490 DEFAULT_VALUE (2):
491 Missing value is interpreted as column
492 default value if declared in the table schema,
493 NULL otherwise.
494 """
495 MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0
496 NULL_VALUE = 1
497 DEFAULT_VALUE = 2
498
499 class ProtoData(proto.Message):
500 r"""ProtoData contains the data rows and schema when constructing
501 append requests.
502
503 Attributes:
504 writer_schema (google.cloud.bigquery_storage_v1.types.ProtoSchema):
505 The protocol buffer schema used to serialize the data.
506 Provide this value whenever:
507
508 - You send the first request of an RPC connection.
509
510 - You change the input schema.
511
512 - You specify a new destination table.
513 rows (google.cloud.bigquery_storage_v1.types.ProtoRows):
514 Serialized row data in protobuf message
515 format. Currently, the backend expects the
516 serialized rows to adhere to proto2 semantics
517 when appending rows, particularly with respect
518 to how default values are encoded.
519 """
520
521 writer_schema: protobuf.ProtoSchema = proto.Field(
522 proto.MESSAGE,
523 number=1,
524 message=protobuf.ProtoSchema,
525 )
526 rows: protobuf.ProtoRows = proto.Field(
527 proto.MESSAGE,
528 number=2,
529 message=protobuf.ProtoRows,
530 )
531
532 write_stream: str = proto.Field(
533 proto.STRING,
534 number=1,
535 )
536 offset: wrappers_pb2.Int64Value = proto.Field(
537 proto.MESSAGE,
538 number=2,
539 message=wrappers_pb2.Int64Value,
540 )
541 proto_rows: ProtoData = proto.Field(
542 proto.MESSAGE,
543 number=4,
544 oneof="rows",
545 message=ProtoData,
546 )
547 trace_id: str = proto.Field(
548 proto.STRING,
549 number=6,
550 )
551 missing_value_interpretations: MutableMapping[
552 str, MissingValueInterpretation
553 ] = proto.MapField(
554 proto.STRING,
555 proto.ENUM,
556 number=7,
557 enum=MissingValueInterpretation,
558 )
559 default_missing_value_interpretation: MissingValueInterpretation = proto.Field(
560 proto.ENUM,
561 number=8,
562 enum=MissingValueInterpretation,
563 )
564
565
566class AppendRowsResponse(proto.Message):
567 r"""Response message for ``AppendRows``.
568
569 This message has `oneof`_ fields (mutually exclusive fields).
570 For each oneof, at most one member field can be set at the same time.
571 Setting any member of the oneof automatically clears all other
572 members.
573
574 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
575
576 Attributes:
577 append_result (google.cloud.bigquery_storage_v1.types.AppendRowsResponse.AppendResult):
578 Result if the append is successful.
579
580 This field is a member of `oneof`_ ``response``.
581 error (google.rpc.status_pb2.Status):
582 Error returned when problems were encountered. If present,
583 it indicates rows were not accepted into the system. Users
584 can retry or continue with other append requests within the
585 same connection.
586
587 Additional information about error signalling:
588
589 ALREADY_EXISTS: Happens when an append specified an offset,
590 and the backend already has received data at this offset.
591 Typically encountered in retry scenarios, and can be
592 ignored.
593
594 OUT_OF_RANGE: Returned when the specified offset in the
595 stream is beyond the current end of the stream.
596
597 INVALID_ARGUMENT: Indicates a malformed request or data.
598
599 ABORTED: Request processing is aborted because of prior
600 failures. The request can be retried if previous failure is
601 addressed.
602
603 INTERNAL: Indicates server side error(s) that can be
604 retried.
605
606 This field is a member of `oneof`_ ``response``.
607 updated_schema (google.cloud.bigquery_storage_v1.types.TableSchema):
608 If backend detects a schema update, pass it
609 to user so that user can use it to input new
610 type of message. It will be empty when no schema
611 updates have occurred.
612 row_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.RowError]):
613 If a request failed due to corrupted rows, no
614 rows in the batch will be appended. The API will
615 return row level error info, so that the caller
616 can remove the bad rows and retry the request.
617 write_stream (str):
618 The target of the append operation. Matches the write_stream
619 in the corresponding request.
620 """
621
622 class AppendResult(proto.Message):
623 r"""AppendResult is returned for successful append requests.
624
625 Attributes:
626 offset (google.protobuf.wrappers_pb2.Int64Value):
627 The row offset at which the last append
628 occurred. The offset will not be set if
629 appending using default streams.
630 """
631
632 offset: wrappers_pb2.Int64Value = proto.Field(
633 proto.MESSAGE,
634 number=1,
635 message=wrappers_pb2.Int64Value,
636 )
637
638 append_result: AppendResult = proto.Field(
639 proto.MESSAGE,
640 number=1,
641 oneof="response",
642 message=AppendResult,
643 )
644 error: status_pb2.Status = proto.Field(
645 proto.MESSAGE,
646 number=2,
647 oneof="response",
648 message=status_pb2.Status,
649 )
650 updated_schema: table.TableSchema = proto.Field(
651 proto.MESSAGE,
652 number=3,
653 message=table.TableSchema,
654 )
655 row_errors: MutableSequence["RowError"] = proto.RepeatedField(
656 proto.MESSAGE,
657 number=4,
658 message="RowError",
659 )
660 write_stream: str = proto.Field(
661 proto.STRING,
662 number=5,
663 )
664
665
666class GetWriteStreamRequest(proto.Message):
667 r"""Request message for ``GetWriteStreamRequest``.
668
669 Attributes:
670 name (str):
671 Required. Name of the stream to get, in the form of
672 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
673 view (google.cloud.bigquery_storage_v1.types.WriteStreamView):
674 Indicates whether to get full or partial view
675 of the WriteStream. If not set, view returned
676 will be basic.
677 """
678
679 name: str = proto.Field(
680 proto.STRING,
681 number=1,
682 )
683 view: stream.WriteStreamView = proto.Field(
684 proto.ENUM,
685 number=3,
686 enum=stream.WriteStreamView,
687 )
688
689
690class BatchCommitWriteStreamsRequest(proto.Message):
691 r"""Request message for ``BatchCommitWriteStreams``.
692
693 Attributes:
694 parent (str):
695 Required. Parent table that all the streams should belong
696 to, in the form of
697 ``projects/{project}/datasets/{dataset}/tables/{table}``.
698 write_streams (MutableSequence[str]):
699 Required. The group of streams that will be
700 committed atomically.
701 """
702
703 parent: str = proto.Field(
704 proto.STRING,
705 number=1,
706 )
707 write_streams: MutableSequence[str] = proto.RepeatedField(
708 proto.STRING,
709 number=2,
710 )
711
712
713class BatchCommitWriteStreamsResponse(proto.Message):
714 r"""Response message for ``BatchCommitWriteStreams``.
715
716 Attributes:
717 commit_time (google.protobuf.timestamp_pb2.Timestamp):
718 The time at which streams were committed in microseconds
719 granularity. This field will only exist when there are no
720 stream errors. **Note** if this field is not set, it means
721 the commit was not successful.
722 stream_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.StorageError]):
723 Stream level error if commit failed. Only
724 streams with error will be in the list.
725 If empty, there is no error and all streams are
726 committed successfully. If non empty, certain
727 streams have errors and ZERO stream is committed
728 due to atomicity guarantee.
729 """
730
731 commit_time: timestamp_pb2.Timestamp = proto.Field(
732 proto.MESSAGE,
733 number=1,
734 message=timestamp_pb2.Timestamp,
735 )
736 stream_errors: MutableSequence["StorageError"] = proto.RepeatedField(
737 proto.MESSAGE,
738 number=2,
739 message="StorageError",
740 )
741
742
743class FinalizeWriteStreamRequest(proto.Message):
744 r"""Request message for invoking ``FinalizeWriteStream``.
745
746 Attributes:
747 name (str):
748 Required. Name of the stream to finalize, in the form of
749 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
750 """
751
752 name: str = proto.Field(
753 proto.STRING,
754 number=1,
755 )
756
757
758class FinalizeWriteStreamResponse(proto.Message):
759 r"""Response message for ``FinalizeWriteStream``.
760
761 Attributes:
762 row_count (int):
763 Number of rows in the finalized stream.
764 """
765
766 row_count: int = proto.Field(
767 proto.INT64,
768 number=1,
769 )
770
771
772class FlushRowsRequest(proto.Message):
773 r"""Request message for ``FlushRows``.
774
775 Attributes:
776 write_stream (str):
777 Required. The stream that is the target of
778 the flush operation.
779 offset (google.protobuf.wrappers_pb2.Int64Value):
780 Ending offset of the flush operation. Rows
781 before this offset(including this offset) will
782 be flushed.
783 """
784
785 write_stream: str = proto.Field(
786 proto.STRING,
787 number=1,
788 )
789 offset: wrappers_pb2.Int64Value = proto.Field(
790 proto.MESSAGE,
791 number=2,
792 message=wrappers_pb2.Int64Value,
793 )
794
795
796class FlushRowsResponse(proto.Message):
797 r"""Respond message for ``FlushRows``.
798
799 Attributes:
800 offset (int):
801 The rows before this offset (including this
802 offset) are flushed.
803 """
804
805 offset: int = proto.Field(
806 proto.INT64,
807 number=1,
808 )
809
810
811class StorageError(proto.Message):
812 r"""Structured custom BigQuery Storage error message. The error
813 can be attached as error details in the returned rpc Status. In
814 particular, the use of error codes allows more structured error
815 handling, and reduces the need to evaluate unstructured error
816 text strings.
817
818 Attributes:
819 code (google.cloud.bigquery_storage_v1.types.StorageError.StorageErrorCode):
820 BigQuery Storage specific error code.
821 entity (str):
822 Name of the failed entity.
823 error_message (str):
824 Message that describes the error.
825 """
826
827 class StorageErrorCode(proto.Enum):
828 r"""Error code for ``StorageError``.
829
830 Values:
831 STORAGE_ERROR_CODE_UNSPECIFIED (0):
832 Default error.
833 TABLE_NOT_FOUND (1):
834 Table is not found in the system.
835 STREAM_ALREADY_COMMITTED (2):
836 Stream is already committed.
837 STREAM_NOT_FOUND (3):
838 Stream is not found.
839 INVALID_STREAM_TYPE (4):
840 Invalid Stream type.
841 For example, you try to commit a stream that is
842 not pending.
843 INVALID_STREAM_STATE (5):
844 Invalid Stream state.
845 For example, you try to commit a stream that is
846 not finalized or is garbaged.
847 STREAM_FINALIZED (6):
848 Stream is finalized.
849 SCHEMA_MISMATCH_EXTRA_FIELDS (7):
850 There is a schema mismatch and it is caused
851 by user schema has extra field than bigquery
852 schema.
853 OFFSET_ALREADY_EXISTS (8):
854 Offset already exists.
855 OFFSET_OUT_OF_RANGE (9):
856 Offset out of range.
857 CMEK_NOT_PROVIDED (10):
858 Customer-managed encryption key (CMEK) not
859 provided for CMEK-enabled data.
860 INVALID_CMEK_PROVIDED (11):
861 Customer-managed encryption key (CMEK) was
862 incorrectly provided.
863 CMEK_ENCRYPTION_ERROR (12):
864 There is an encryption error while using
865 customer-managed encryption key.
866 KMS_SERVICE_ERROR (13):
867 Key Management Service (KMS) service returned
868 an error, which can be retried.
869 KMS_PERMISSION_DENIED (14):
870 Permission denied while using
871 customer-managed encryption key.
872 """
873 STORAGE_ERROR_CODE_UNSPECIFIED = 0
874 TABLE_NOT_FOUND = 1
875 STREAM_ALREADY_COMMITTED = 2
876 STREAM_NOT_FOUND = 3
877 INVALID_STREAM_TYPE = 4
878 INVALID_STREAM_STATE = 5
879 STREAM_FINALIZED = 6
880 SCHEMA_MISMATCH_EXTRA_FIELDS = 7
881 OFFSET_ALREADY_EXISTS = 8
882 OFFSET_OUT_OF_RANGE = 9
883 CMEK_NOT_PROVIDED = 10
884 INVALID_CMEK_PROVIDED = 11
885 CMEK_ENCRYPTION_ERROR = 12
886 KMS_SERVICE_ERROR = 13
887 KMS_PERMISSION_DENIED = 14
888
889 code: StorageErrorCode = proto.Field(
890 proto.ENUM,
891 number=1,
892 enum=StorageErrorCode,
893 )
894 entity: str = proto.Field(
895 proto.STRING,
896 number=2,
897 )
898 error_message: str = proto.Field(
899 proto.STRING,
900 number=3,
901 )
902
903
904class RowError(proto.Message):
905 r"""The message that presents row level error info in a request.
906
907 Attributes:
908 index (int):
909 Index of the malformed row in the request.
910 code (google.cloud.bigquery_storage_v1.types.RowError.RowErrorCode):
911 Structured error reason for a row error.
912 message (str):
913 Description of the issue encountered when
914 processing the row.
915 """
916
917 class RowErrorCode(proto.Enum):
918 r"""Error code for ``RowError``.
919
920 Values:
921 ROW_ERROR_CODE_UNSPECIFIED (0):
922 Default error.
923 FIELDS_ERROR (1):
924 One or more fields in the row has errors.
925 """
926 ROW_ERROR_CODE_UNSPECIFIED = 0
927 FIELDS_ERROR = 1
928
929 index: int = proto.Field(
930 proto.INT64,
931 number=1,
932 )
933 code: RowErrorCode = proto.Field(
934 proto.ENUM,
935 number=2,
936 enum=RowErrorCode,
937 )
938 message: str = proto.Field(
939 proto.STRING,
940 number=3,
941 )
942
943
944__all__ = tuple(sorted(__protobuf__.manifest))