Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/google/cloud/bigquery_storage_v1/types/storage.py: 99%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

114 statements  

1# -*- coding: utf-8 -*- 

2# Copyright 2025 Google LLC 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"); 

5# you may not use this file except in compliance with the License. 

6# You may obtain a copy of the License at 

7# 

8# http://www.apache.org/licenses/LICENSE-2.0 

9# 

10# Unless required by applicable law or agreed to in writing, software 

11# distributed under the License is distributed on an "AS IS" BASIS, 

12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

13# See the License for the specific language governing permissions and 

14# limitations under the License. 

15# 

16from __future__ import annotations 

17 

18from typing import MutableMapping, MutableSequence 

19 

20from google.protobuf import timestamp_pb2 # type: ignore 

21from google.protobuf import wrappers_pb2 # type: ignore 

22from google.rpc import status_pb2 # type: ignore 

23import proto # type: ignore 

24 

25from google.cloud.bigquery_storage_v1.types import arrow, avro, protobuf, stream, table 

26 

27__protobuf__ = proto.module( 

28 package="google.cloud.bigquery.storage.v1", 

29 manifest={ 

30 "CreateReadSessionRequest", 

31 "ReadRowsRequest", 

32 "ThrottleState", 

33 "StreamStats", 

34 "ReadRowsResponse", 

35 "SplitReadStreamRequest", 

36 "SplitReadStreamResponse", 

37 "CreateWriteStreamRequest", 

38 "AppendRowsRequest", 

39 "AppendRowsResponse", 

40 "GetWriteStreamRequest", 

41 "BatchCommitWriteStreamsRequest", 

42 "BatchCommitWriteStreamsResponse", 

43 "FinalizeWriteStreamRequest", 

44 "FinalizeWriteStreamResponse", 

45 "FlushRowsRequest", 

46 "FlushRowsResponse", 

47 "StorageError", 

48 "RowError", 

49 }, 

50) 

51 

52 

53class CreateReadSessionRequest(proto.Message): 

54 r"""Request message for ``CreateReadSession``. 

55 

56 Attributes: 

57 parent (str): 

58 Required. The request project that owns the session, in the 

59 form of ``projects/{project_id}``. 

60 read_session (google.cloud.bigquery_storage_v1.types.ReadSession): 

61 Required. Session to be created. 

62 max_stream_count (int): 

63 Max initial number of streams. If unset or zero, the server 

64 will provide a value of streams so as to produce reasonable 

65 throughput. Must be non-negative. The number of streams may 

66 be lower than the requested number, depending on the amount 

67 parallelism that is reasonable for the table. There is a 

68 default system max limit of 1,000. 

69 

70 This must be greater than or equal to 

71 preferred_min_stream_count. Typically, clients should either 

72 leave this unset to let the system to determine an upper 

73 bound OR set this a size for the maximum "units of work" it 

74 can gracefully handle. 

75 preferred_min_stream_count (int): 

76 The minimum preferred stream count. This 

77 parameter can be used to inform the service that 

78 there is a desired lower bound on the number of 

79 streams. This is typically a target parallelism 

80 of the client (e.g. a Spark cluster with 

81 N-workers would set this to a low multiple of N 

82 to ensure good cluster utilization). 

83 

84 The system will make a best effort to provide at 

85 least this number of streams, but in some cases 

86 might provide less. 

87 """ 

88 

89 parent: str = proto.Field( 

90 proto.STRING, 

91 number=1, 

92 ) 

93 read_session: stream.ReadSession = proto.Field( 

94 proto.MESSAGE, 

95 number=2, 

96 message=stream.ReadSession, 

97 ) 

98 max_stream_count: int = proto.Field( 

99 proto.INT32, 

100 number=3, 

101 ) 

102 preferred_min_stream_count: int = proto.Field( 

103 proto.INT32, 

104 number=4, 

105 ) 

106 

107 

108class ReadRowsRequest(proto.Message): 

109 r"""Request message for ``ReadRows``. 

110 

111 Attributes: 

112 read_stream (str): 

113 Required. Stream to read rows from. 

114 offset (int): 

115 The offset requested must be less than the 

116 last row read from Read. Requesting a larger 

117 offset is undefined. If not specified, start 

118 reading from offset zero. 

119 """ 

120 

121 read_stream: str = proto.Field( 

122 proto.STRING, 

123 number=1, 

124 ) 

125 offset: int = proto.Field( 

126 proto.INT64, 

127 number=2, 

128 ) 

129 

130 

131class ThrottleState(proto.Message): 

132 r"""Information on if the current connection is being throttled. 

133 

134 Attributes: 

135 throttle_percent (int): 

136 How much this connection is being throttled. 

137 Zero means no throttling, 100 means fully 

138 throttled. 

139 """ 

140 

141 throttle_percent: int = proto.Field( 

142 proto.INT32, 

143 number=1, 

144 ) 

145 

146 

147class StreamStats(proto.Message): 

148 r"""Estimated stream statistics for a given read Stream. 

149 

150 Attributes: 

151 progress (google.cloud.bigquery_storage_v1.types.StreamStats.Progress): 

152 Represents the progress of the current 

153 stream. 

154 """ 

155 

156 class Progress(proto.Message): 

157 r""" 

158 

159 Attributes: 

160 at_response_start (float): 

161 The fraction of rows assigned to the stream that have been 

162 processed by the server so far, not including the rows in 

163 the current response message. 

164 

165 This value, along with ``at_response_end``, can be used to 

166 interpolate the progress made as the rows in the message are 

167 being processed using the following formula: 

168 ``at_response_start + (at_response_end - at_response_start) * rows_processed_from_response / rows_in_response``. 

169 

170 Note that if a filter is provided, the ``at_response_end`` 

171 value of the previous response may not necessarily be equal 

172 to the ``at_response_start`` value of the current response. 

173 at_response_end (float): 

174 Similar to ``at_response_start``, except that this value 

175 includes the rows in the current response. 

176 """ 

177 

178 at_response_start: float = proto.Field( 

179 proto.DOUBLE, 

180 number=1, 

181 ) 

182 at_response_end: float = proto.Field( 

183 proto.DOUBLE, 

184 number=2, 

185 ) 

186 

187 progress: Progress = proto.Field( 

188 proto.MESSAGE, 

189 number=2, 

190 message=Progress, 

191 ) 

192 

193 

194class ReadRowsResponse(proto.Message): 

195 r"""Response from calling ``ReadRows`` may include row data, progress 

196 and throttling information. 

197 

198 This message has `oneof`_ fields (mutually exclusive fields). 

199 For each oneof, at most one member field can be set at the same time. 

200 Setting any member of the oneof automatically clears all other 

201 members. 

202 

203 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields 

204 

205 Attributes: 

206 avro_rows (google.cloud.bigquery_storage_v1.types.AvroRows): 

207 Serialized row data in AVRO format. 

208 

209 This field is a member of `oneof`_ ``rows``. 

210 arrow_record_batch (google.cloud.bigquery_storage_v1.types.ArrowRecordBatch): 

211 Serialized row data in Arrow RecordBatch 

212 format. 

213 

214 This field is a member of `oneof`_ ``rows``. 

215 row_count (int): 

216 Number of serialized rows in the rows block. 

217 stats (google.cloud.bigquery_storage_v1.types.StreamStats): 

218 Statistics for the stream. 

219 throttle_state (google.cloud.bigquery_storage_v1.types.ThrottleState): 

220 Throttling state. If unset, the latest 

221 response still describes the current throttling 

222 status. 

223 avro_schema (google.cloud.bigquery_storage_v1.types.AvroSchema): 

224 Output only. Avro schema. 

225 

226 This field is a member of `oneof`_ ``schema``. 

227 arrow_schema (google.cloud.bigquery_storage_v1.types.ArrowSchema): 

228 Output only. Arrow schema. 

229 

230 This field is a member of `oneof`_ ``schema``. 

231 uncompressed_byte_size (int): 

232 Optional. If the row data in this ReadRowsResponse is 

233 compressed, then uncompressed byte size is the original size 

234 of the uncompressed row data. If it is set to a value 

235 greater than 0, then decompress into a buffer of size 

236 uncompressed_byte_size using the compression codec that was 

237 requested during session creation time and which is 

238 specified in TableReadOptions.response_compression_codec in 

239 ReadSession. This value is not set if no 

240 response_compression_codec was not requested and it is -1 if 

241 the requested compression would not have reduced the size of 

242 this ReadRowsResponse's row data. This attempts to match 

243 Apache Arrow's behavior described here 

244 https://github.com/apache/arrow/issues/15102 where the 

245 uncompressed length may be set to -1 to indicate that the 

246 data that follows is not compressed, which can be useful for 

247 cases where compression does not yield appreciable savings. 

248 When uncompressed_byte_size is not greater than 0, the 

249 client should skip decompression. 

250 

251 This field is a member of `oneof`_ ``_uncompressed_byte_size``. 

252 """ 

253 

254 avro_rows: avro.AvroRows = proto.Field( 

255 proto.MESSAGE, 

256 number=3, 

257 oneof="rows", 

258 message=avro.AvroRows, 

259 ) 

260 arrow_record_batch: arrow.ArrowRecordBatch = proto.Field( 

261 proto.MESSAGE, 

262 number=4, 

263 oneof="rows", 

264 message=arrow.ArrowRecordBatch, 

265 ) 

266 row_count: int = proto.Field( 

267 proto.INT64, 

268 number=6, 

269 ) 

270 stats: "StreamStats" = proto.Field( 

271 proto.MESSAGE, 

272 number=2, 

273 message="StreamStats", 

274 ) 

275 throttle_state: "ThrottleState" = proto.Field( 

276 proto.MESSAGE, 

277 number=5, 

278 message="ThrottleState", 

279 ) 

280 avro_schema: avro.AvroSchema = proto.Field( 

281 proto.MESSAGE, 

282 number=7, 

283 oneof="schema", 

284 message=avro.AvroSchema, 

285 ) 

286 arrow_schema: arrow.ArrowSchema = proto.Field( 

287 proto.MESSAGE, 

288 number=8, 

289 oneof="schema", 

290 message=arrow.ArrowSchema, 

291 ) 

292 uncompressed_byte_size: int = proto.Field( 

293 proto.INT64, 

294 number=9, 

295 optional=True, 

296 ) 

297 

298 

299class SplitReadStreamRequest(proto.Message): 

300 r"""Request message for ``SplitReadStream``. 

301 

302 Attributes: 

303 name (str): 

304 Required. Name of the stream to split. 

305 fraction (float): 

306 A value in the range (0.0, 1.0) that 

307 specifies the fractional point at which the 

308 original stream should be split. The actual 

309 split point is evaluated on pre-filtered rows, 

310 so if a filter is provided, then there is no 

311 guarantee that the division of the rows between 

312 the new child streams will be proportional to 

313 this fractional value. Additionally, because the 

314 server-side unit for assigning data is 

315 collections of rows, this fraction will always 

316 map to a data storage boundary on the server 

317 side. 

318 """ 

319 

320 name: str = proto.Field( 

321 proto.STRING, 

322 number=1, 

323 ) 

324 fraction: float = proto.Field( 

325 proto.DOUBLE, 

326 number=2, 

327 ) 

328 

329 

330class SplitReadStreamResponse(proto.Message): 

331 r"""Response message for ``SplitReadStream``. 

332 

333 Attributes: 

334 primary_stream (google.cloud.bigquery_storage_v1.types.ReadStream): 

335 Primary stream, which contains the beginning portion of 

336 \|original_stream|. An empty value indicates that the 

337 original stream can no longer be split. 

338 remainder_stream (google.cloud.bigquery_storage_v1.types.ReadStream): 

339 Remainder stream, which contains the tail of 

340 \|original_stream|. An empty value indicates that the 

341 original stream can no longer be split. 

342 """ 

343 

344 primary_stream: stream.ReadStream = proto.Field( 

345 proto.MESSAGE, 

346 number=1, 

347 message=stream.ReadStream, 

348 ) 

349 remainder_stream: stream.ReadStream = proto.Field( 

350 proto.MESSAGE, 

351 number=2, 

352 message=stream.ReadStream, 

353 ) 

354 

355 

356class CreateWriteStreamRequest(proto.Message): 

357 r"""Request message for ``CreateWriteStream``. 

358 

359 Attributes: 

360 parent (str): 

361 Required. Reference to the table to which the stream 

362 belongs, in the format of 

363 ``projects/{project}/datasets/{dataset}/tables/{table}``. 

364 write_stream (google.cloud.bigquery_storage_v1.types.WriteStream): 

365 Required. Stream to be created. 

366 """ 

367 

368 parent: str = proto.Field( 

369 proto.STRING, 

370 number=1, 

371 ) 

372 write_stream: stream.WriteStream = proto.Field( 

373 proto.MESSAGE, 

374 number=2, 

375 message=stream.WriteStream, 

376 ) 

377 

378 

379class AppendRowsRequest(proto.Message): 

380 r"""Request message for ``AppendRows``. 

381 

382 Because AppendRows is a bidirectional streaming RPC, certain parts 

383 of the AppendRowsRequest need only be specified for the first 

384 request before switching table destinations. You can also switch 

385 table destinations within the same connection for the default 

386 stream. 

387 

388 The size of a single AppendRowsRequest must be less than 10 MB in 

389 size. Requests larger than this return an error, typically 

390 ``INVALID_ARGUMENT``. 

391 

392 This message has `oneof`_ fields (mutually exclusive fields). 

393 For each oneof, at most one member field can be set at the same time. 

394 Setting any member of the oneof automatically clears all other 

395 members. 

396 

397 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields 

398 

399 Attributes: 

400 write_stream (str): 

401 Required. The write_stream identifies the append operation. 

402 It must be provided in the following scenarios: 

403 

404 - In the first request to an AppendRows connection. 

405 

406 - In all subsequent requests to an AppendRows connection, 

407 if you use the same connection to write to multiple 

408 tables or change the input schema for default streams. 

409 

410 For explicitly created write streams, the format is: 

411 

412 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`` 

413 

414 For the special default stream, the format is: 

415 

416 - ``projects/{project}/datasets/{dataset}/tables/{table}/streams/_default``. 

417 

418 An example of a possible sequence of requests with 

419 write_stream fields within a single connection: 

420 

421 - r1: {write_stream: stream_name_1} 

422 

423 - r2: {write_stream: /*omit*/} 

424 

425 - r3: {write_stream: /*omit*/} 

426 

427 - r4: {write_stream: stream_name_2} 

428 

429 - r5: {write_stream: stream_name_2} 

430 

431 The destination changed in request_4, so the write_stream 

432 field must be populated in all subsequent requests in this 

433 stream. 

434 offset (google.protobuf.wrappers_pb2.Int64Value): 

435 If present, the write is only performed if the next append 

436 offset is same as the provided value. If not present, the 

437 write is performed at the current end of stream. Specifying 

438 a value for this field is not allowed when calling 

439 AppendRows for the '_default' stream. 

440 proto_rows (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.ProtoData): 

441 Rows in proto format. 

442 

443 This field is a member of `oneof`_ ``rows``. 

444 arrow_rows (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.ArrowData): 

445 Rows in arrow format. This is an experimental 

446 feature only selected for allowlisted customers. 

447 

448 This field is a member of `oneof`_ ``rows``. 

449 trace_id (str): 

450 Id set by client to annotate its identity. 

451 Only initial request setting is respected. 

452 missing_value_interpretations (MutableMapping[str, google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation]): 

453 A map to indicate how to interpret missing value for some 

454 fields. Missing values are fields present in user schema but 

455 missing in rows. The key is the field name. The value is the 

456 interpretation of missing values for the field. 

457 

458 For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} 

459 means all missing values in field foo are interpreted as 

460 NULL, all missing values in field bar are interpreted as the 

461 default value of field bar in table schema. 

462 

463 If a field is not in this map and has missing values, the 

464 missing values in this field are interpreted as NULL. 

465 

466 This field only applies to the current request, it won't 

467 affect other requests on the connection. 

468 

469 Currently, field name can only be top-level column name, 

470 can't be a struct field path like 'foo.bar'. 

471 default_missing_value_interpretation (google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation): 

472 Optional. Default missing value interpretation for all 

473 columns in the table. When a value is specified on an 

474 ``AppendRowsRequest``, it is applied to all requests on the 

475 connection from that point forward, until a subsequent 

476 ``AppendRowsRequest`` sets it to a different value. 

477 ``missing_value_interpretation`` can override 

478 ``default_missing_value_interpretation``. For example, if 

479 you want to write ``NULL`` instead of using default values 

480 for some columns, you can set 

481 ``default_missing_value_interpretation`` to 

482 ``DEFAULT_VALUE`` and at the same time, set 

483 ``missing_value_interpretations`` to ``NULL_VALUE`` on those 

484 columns. 

485 """ 

486 

487 class MissingValueInterpretation(proto.Enum): 

488 r"""An enum to indicate how to interpret missing values of fields 

489 that are present in user schema but missing in rows. A missing 

490 value can represent a NULL or a column default value defined in 

491 BigQuery table schema. 

492 

493 Values: 

494 MISSING_VALUE_INTERPRETATION_UNSPECIFIED (0): 

495 Invalid missing value interpretation. 

496 Requests with this value will be rejected. 

497 NULL_VALUE (1): 

498 Missing value is interpreted as NULL. 

499 DEFAULT_VALUE (2): 

500 Missing value is interpreted as column 

501 default value if declared in the table schema, 

502 NULL otherwise. 

503 """ 

504 MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0 

505 NULL_VALUE = 1 

506 DEFAULT_VALUE = 2 

507 

508 class ArrowData(proto.Message): 

509 r"""Arrow schema and data. 

510 Arrow format is an experimental feature only selected for 

511 allowlisted customers. 

512 

513 Attributes: 

514 writer_schema (google.cloud.bigquery_storage_v1.types.ArrowSchema): 

515 Optional. Arrow Schema used to serialize the 

516 data. 

517 rows (google.cloud.bigquery_storage_v1.types.ArrowRecordBatch): 

518 Required. Serialized row data in Arrow 

519 format. 

520 """ 

521 

522 writer_schema: arrow.ArrowSchema = proto.Field( 

523 proto.MESSAGE, 

524 number=1, 

525 message=arrow.ArrowSchema, 

526 ) 

527 rows: arrow.ArrowRecordBatch = proto.Field( 

528 proto.MESSAGE, 

529 number=2, 

530 message=arrow.ArrowRecordBatch, 

531 ) 

532 

533 class ProtoData(proto.Message): 

534 r"""ProtoData contains the data rows and schema when constructing 

535 append requests. 

536 

537 Attributes: 

538 writer_schema (google.cloud.bigquery_storage_v1.types.ProtoSchema): 

539 The protocol buffer schema used to serialize the data. 

540 Provide this value whenever: 

541 

542 - You send the first request of an RPC connection. 

543 

544 - You change the input schema. 

545 

546 - You specify a new destination table. 

547 rows (google.cloud.bigquery_storage_v1.types.ProtoRows): 

548 Serialized row data in protobuf message 

549 format. Currently, the backend expects the 

550 serialized rows to adhere to proto2 semantics 

551 when appending rows, particularly with respect 

552 to how default values are encoded. 

553 """ 

554 

555 writer_schema: protobuf.ProtoSchema = proto.Field( 

556 proto.MESSAGE, 

557 number=1, 

558 message=protobuf.ProtoSchema, 

559 ) 

560 rows: protobuf.ProtoRows = proto.Field( 

561 proto.MESSAGE, 

562 number=2, 

563 message=protobuf.ProtoRows, 

564 ) 

565 

566 write_stream: str = proto.Field( 

567 proto.STRING, 

568 number=1, 

569 ) 

570 offset: wrappers_pb2.Int64Value = proto.Field( 

571 proto.MESSAGE, 

572 number=2, 

573 message=wrappers_pb2.Int64Value, 

574 ) 

575 proto_rows: ProtoData = proto.Field( 

576 proto.MESSAGE, 

577 number=4, 

578 oneof="rows", 

579 message=ProtoData, 

580 ) 

581 arrow_rows: ArrowData = proto.Field( 

582 proto.MESSAGE, 

583 number=5, 

584 oneof="rows", 

585 message=ArrowData, 

586 ) 

587 trace_id: str = proto.Field( 

588 proto.STRING, 

589 number=6, 

590 ) 

591 missing_value_interpretations: MutableMapping[ 

592 str, MissingValueInterpretation 

593 ] = proto.MapField( 

594 proto.STRING, 

595 proto.ENUM, 

596 number=7, 

597 enum=MissingValueInterpretation, 

598 ) 

599 default_missing_value_interpretation: MissingValueInterpretation = proto.Field( 

600 proto.ENUM, 

601 number=8, 

602 enum=MissingValueInterpretation, 

603 ) 

604 

605 

606class AppendRowsResponse(proto.Message): 

607 r"""Response message for ``AppendRows``. 

608 

609 This message has `oneof`_ fields (mutually exclusive fields). 

610 For each oneof, at most one member field can be set at the same time. 

611 Setting any member of the oneof automatically clears all other 

612 members. 

613 

614 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields 

615 

616 Attributes: 

617 append_result (google.cloud.bigquery_storage_v1.types.AppendRowsResponse.AppendResult): 

618 Result if the append is successful. 

619 

620 This field is a member of `oneof`_ ``response``. 

621 error (google.rpc.status_pb2.Status): 

622 Error returned when problems were encountered. If present, 

623 it indicates rows were not accepted into the system. Users 

624 can retry or continue with other append requests within the 

625 same connection. 

626 

627 Additional information about error signalling: 

628 

629 ALREADY_EXISTS: Happens when an append specified an offset, 

630 and the backend already has received data at this offset. 

631 Typically encountered in retry scenarios, and can be 

632 ignored. 

633 

634 OUT_OF_RANGE: Returned when the specified offset in the 

635 stream is beyond the current end of the stream. 

636 

637 INVALID_ARGUMENT: Indicates a malformed request or data. 

638 

639 ABORTED: Request processing is aborted because of prior 

640 failures. The request can be retried if previous failure is 

641 addressed. 

642 

643 INTERNAL: Indicates server side error(s) that can be 

644 retried. 

645 

646 This field is a member of `oneof`_ ``response``. 

647 updated_schema (google.cloud.bigquery_storage_v1.types.TableSchema): 

648 If backend detects a schema update, pass it 

649 to user so that user can use it to input new 

650 type of message. It will be empty when no schema 

651 updates have occurred. 

652 row_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.RowError]): 

653 If a request failed due to corrupted rows, no 

654 rows in the batch will be appended. The API will 

655 return row level error info, so that the caller 

656 can remove the bad rows and retry the request. 

657 write_stream (str): 

658 The target of the append operation. Matches the write_stream 

659 in the corresponding request. 

660 """ 

661 

662 class AppendResult(proto.Message): 

663 r"""AppendResult is returned for successful append requests. 

664 

665 Attributes: 

666 offset (google.protobuf.wrappers_pb2.Int64Value): 

667 The row offset at which the last append 

668 occurred. The offset will not be set if 

669 appending using default streams. 

670 """ 

671 

672 offset: wrappers_pb2.Int64Value = proto.Field( 

673 proto.MESSAGE, 

674 number=1, 

675 message=wrappers_pb2.Int64Value, 

676 ) 

677 

678 append_result: AppendResult = proto.Field( 

679 proto.MESSAGE, 

680 number=1, 

681 oneof="response", 

682 message=AppendResult, 

683 ) 

684 error: status_pb2.Status = proto.Field( 

685 proto.MESSAGE, 

686 number=2, 

687 oneof="response", 

688 message=status_pb2.Status, 

689 ) 

690 updated_schema: table.TableSchema = proto.Field( 

691 proto.MESSAGE, 

692 number=3, 

693 message=table.TableSchema, 

694 ) 

695 row_errors: MutableSequence["RowError"] = proto.RepeatedField( 

696 proto.MESSAGE, 

697 number=4, 

698 message="RowError", 

699 ) 

700 write_stream: str = proto.Field( 

701 proto.STRING, 

702 number=5, 

703 ) 

704 

705 

706class GetWriteStreamRequest(proto.Message): 

707 r"""Request message for ``GetWriteStreamRequest``. 

708 

709 Attributes: 

710 name (str): 

711 Required. Name of the stream to get, in the form of 

712 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. 

713 view (google.cloud.bigquery_storage_v1.types.WriteStreamView): 

714 Indicates whether to get full or partial view 

715 of the WriteStream. If not set, view returned 

716 will be basic. 

717 """ 

718 

719 name: str = proto.Field( 

720 proto.STRING, 

721 number=1, 

722 ) 

723 view: stream.WriteStreamView = proto.Field( 

724 proto.ENUM, 

725 number=3, 

726 enum=stream.WriteStreamView, 

727 ) 

728 

729 

730class BatchCommitWriteStreamsRequest(proto.Message): 

731 r"""Request message for ``BatchCommitWriteStreams``. 

732 

733 Attributes: 

734 parent (str): 

735 Required. Parent table that all the streams should belong 

736 to, in the form of 

737 ``projects/{project}/datasets/{dataset}/tables/{table}``. 

738 write_streams (MutableSequence[str]): 

739 Required. The group of streams that will be 

740 committed atomically. 

741 """ 

742 

743 parent: str = proto.Field( 

744 proto.STRING, 

745 number=1, 

746 ) 

747 write_streams: MutableSequence[str] = proto.RepeatedField( 

748 proto.STRING, 

749 number=2, 

750 ) 

751 

752 

753class BatchCommitWriteStreamsResponse(proto.Message): 

754 r"""Response message for ``BatchCommitWriteStreams``. 

755 

756 Attributes: 

757 commit_time (google.protobuf.timestamp_pb2.Timestamp): 

758 The time at which streams were committed in microseconds 

759 granularity. This field will only exist when there are no 

760 stream errors. **Note** if this field is not set, it means 

761 the commit was not successful. 

762 stream_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.StorageError]): 

763 Stream level error if commit failed. Only 

764 streams with error will be in the list. 

765 If empty, there is no error and all streams are 

766 committed successfully. If non empty, certain 

767 streams have errors and ZERO stream is committed 

768 due to atomicity guarantee. 

769 """ 

770 

771 commit_time: timestamp_pb2.Timestamp = proto.Field( 

772 proto.MESSAGE, 

773 number=1, 

774 message=timestamp_pb2.Timestamp, 

775 ) 

776 stream_errors: MutableSequence["StorageError"] = proto.RepeatedField( 

777 proto.MESSAGE, 

778 number=2, 

779 message="StorageError", 

780 ) 

781 

782 

783class FinalizeWriteStreamRequest(proto.Message): 

784 r"""Request message for invoking ``FinalizeWriteStream``. 

785 

786 Attributes: 

787 name (str): 

788 Required. Name of the stream to finalize, in the form of 

789 ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. 

790 """ 

791 

792 name: str = proto.Field( 

793 proto.STRING, 

794 number=1, 

795 ) 

796 

797 

798class FinalizeWriteStreamResponse(proto.Message): 

799 r"""Response message for ``FinalizeWriteStream``. 

800 

801 Attributes: 

802 row_count (int): 

803 Number of rows in the finalized stream. 

804 """ 

805 

806 row_count: int = proto.Field( 

807 proto.INT64, 

808 number=1, 

809 ) 

810 

811 

812class FlushRowsRequest(proto.Message): 

813 r"""Request message for ``FlushRows``. 

814 

815 Attributes: 

816 write_stream (str): 

817 Required. The stream that is the target of 

818 the flush operation. 

819 offset (google.protobuf.wrappers_pb2.Int64Value): 

820 Ending offset of the flush operation. Rows 

821 before this offset(including this offset) will 

822 be flushed. 

823 """ 

824 

825 write_stream: str = proto.Field( 

826 proto.STRING, 

827 number=1, 

828 ) 

829 offset: wrappers_pb2.Int64Value = proto.Field( 

830 proto.MESSAGE, 

831 number=2, 

832 message=wrappers_pb2.Int64Value, 

833 ) 

834 

835 

836class FlushRowsResponse(proto.Message): 

837 r"""Respond message for ``FlushRows``. 

838 

839 Attributes: 

840 offset (int): 

841 The rows before this offset (including this 

842 offset) are flushed. 

843 """ 

844 

845 offset: int = proto.Field( 

846 proto.INT64, 

847 number=1, 

848 ) 

849 

850 

851class StorageError(proto.Message): 

852 r"""Structured custom BigQuery Storage error message. The error 

853 can be attached as error details in the returned rpc Status. In 

854 particular, the use of error codes allows more structured error 

855 handling, and reduces the need to evaluate unstructured error 

856 text strings. 

857 

858 Attributes: 

859 code (google.cloud.bigquery_storage_v1.types.StorageError.StorageErrorCode): 

860 BigQuery Storage specific error code. 

861 entity (str): 

862 Name of the failed entity. 

863 error_message (str): 

864 Message that describes the error. 

865 """ 

866 

867 class StorageErrorCode(proto.Enum): 

868 r"""Error code for ``StorageError``. 

869 

870 Values: 

871 STORAGE_ERROR_CODE_UNSPECIFIED (0): 

872 Default error. 

873 TABLE_NOT_FOUND (1): 

874 Table is not found in the system. 

875 STREAM_ALREADY_COMMITTED (2): 

876 Stream is already committed. 

877 STREAM_NOT_FOUND (3): 

878 Stream is not found. 

879 INVALID_STREAM_TYPE (4): 

880 Invalid Stream type. 

881 For example, you try to commit a stream that is 

882 not pending. 

883 INVALID_STREAM_STATE (5): 

884 Invalid Stream state. 

885 For example, you try to commit a stream that is 

886 not finalized or is garbaged. 

887 STREAM_FINALIZED (6): 

888 Stream is finalized. 

889 SCHEMA_MISMATCH_EXTRA_FIELDS (7): 

890 There is a schema mismatch and it is caused 

891 by user schema has extra field than bigquery 

892 schema. 

893 OFFSET_ALREADY_EXISTS (8): 

894 Offset already exists. 

895 OFFSET_OUT_OF_RANGE (9): 

896 Offset out of range. 

897 CMEK_NOT_PROVIDED (10): 

898 Customer-managed encryption key (CMEK) not 

899 provided for CMEK-enabled data. 

900 INVALID_CMEK_PROVIDED (11): 

901 Customer-managed encryption key (CMEK) was 

902 incorrectly provided. 

903 CMEK_ENCRYPTION_ERROR (12): 

904 There is an encryption error while using 

905 customer-managed encryption key. 

906 KMS_SERVICE_ERROR (13): 

907 Key Management Service (KMS) service returned 

908 an error, which can be retried. 

909 KMS_PERMISSION_DENIED (14): 

910 Permission denied while using 

911 customer-managed encryption key. 

912 """ 

913 STORAGE_ERROR_CODE_UNSPECIFIED = 0 

914 TABLE_NOT_FOUND = 1 

915 STREAM_ALREADY_COMMITTED = 2 

916 STREAM_NOT_FOUND = 3 

917 INVALID_STREAM_TYPE = 4 

918 INVALID_STREAM_STATE = 5 

919 STREAM_FINALIZED = 6 

920 SCHEMA_MISMATCH_EXTRA_FIELDS = 7 

921 OFFSET_ALREADY_EXISTS = 8 

922 OFFSET_OUT_OF_RANGE = 9 

923 CMEK_NOT_PROVIDED = 10 

924 INVALID_CMEK_PROVIDED = 11 

925 CMEK_ENCRYPTION_ERROR = 12 

926 KMS_SERVICE_ERROR = 13 

927 KMS_PERMISSION_DENIED = 14 

928 

929 code: StorageErrorCode = proto.Field( 

930 proto.ENUM, 

931 number=1, 

932 enum=StorageErrorCode, 

933 ) 

934 entity: str = proto.Field( 

935 proto.STRING, 

936 number=2, 

937 ) 

938 error_message: str = proto.Field( 

939 proto.STRING, 

940 number=3, 

941 ) 

942 

943 

944class RowError(proto.Message): 

945 r"""The message that presents row level error info in a request. 

946 

947 Attributes: 

948 index (int): 

949 Index of the malformed row in the request. 

950 code (google.cloud.bigquery_storage_v1.types.RowError.RowErrorCode): 

951 Structured error reason for a row error. 

952 message (str): 

953 Description of the issue encountered when 

954 processing the row. 

955 """ 

956 

957 class RowErrorCode(proto.Enum): 

958 r"""Error code for ``RowError``. 

959 

960 Values: 

961 ROW_ERROR_CODE_UNSPECIFIED (0): 

962 Default error. 

963 FIELDS_ERROR (1): 

964 One or more fields in the row has errors. 

965 """ 

966 ROW_ERROR_CODE_UNSPECIFIED = 0 

967 FIELDS_ERROR = 1 

968 

969 index: int = proto.Field( 

970 proto.INT64, 

971 number=1, 

972 ) 

973 code: RowErrorCode = proto.Field( 

974 proto.ENUM, 

975 number=2, 

976 enum=RowErrorCode, 

977 ) 

978 message: str = proto.Field( 

979 proto.STRING, 

980 number=3, 

981 ) 

982 

983 

984__all__ = tuple(sorted(__protobuf__.manifest))