1# engine/cursor.py
2# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
3# <see AUTHORS file>
4#
5# This module is part of SQLAlchemy and is released under
6# the MIT License: https://www.opensource.org/licenses/mit-license.php
7# mypy: allow-untyped-defs, allow-untyped-calls
8
9"""Define cursor-specific result set constructs including
10:class:`.CursorResult`."""
11
12
13from __future__ import annotations
14
15import collections
16import functools
17import operator
18import typing
19from typing import Any
20from typing import cast
21from typing import ClassVar
22from typing import Dict
23from typing import Iterator
24from typing import List
25from typing import Mapping
26from typing import NoReturn
27from typing import Optional
28from typing import Sequence
29from typing import Tuple
30from typing import TYPE_CHECKING
31from typing import Union
32
33from .result import IteratorResult
34from .result import MergedResult
35from .result import Result
36from .result import ResultMetaData
37from .result import SimpleResultMetaData
38from .result import tuplegetter
39from .row import Row
40from .. import exc
41from .. import util
42from ..sql import elements
43from ..sql import sqltypes
44from ..sql import util as sql_util
45from ..sql.base import _generative
46from ..sql.compiler import ResultColumnsEntry
47from ..sql.compiler import RM_NAME
48from ..sql.compiler import RM_OBJECTS
49from ..sql.compiler import RM_RENDERED_NAME
50from ..sql.compiler import RM_TYPE
51from ..sql.type_api import TypeEngine
52from ..util import compat
53from ..util.typing import Literal
54from ..util.typing import Self
55from ..util.typing import TupleAny
56from ..util.typing import TypeVarTuple
57from ..util.typing import Unpack
58
59
60if typing.TYPE_CHECKING:
61 from .base import Connection
62 from .default import DefaultExecutionContext
63 from .interfaces import _DBAPICursorDescription
64 from .interfaces import DBAPICursor
65 from .interfaces import Dialect
66 from .interfaces import ExecutionContext
67 from .result import _KeyIndexType
68 from .result import _KeyMapRecType
69 from .result import _KeyMapType
70 from .result import _KeyType
71 from .result import _ProcessorsType
72 from .result import _TupleGetterType
73 from ..sql.type_api import _ResultProcessorType
74
75
76_Ts = TypeVarTuple("_Ts")
77
78
79# metadata entry tuple indexes.
80# using raw tuple is faster than namedtuple.
81# these match up to the positions in
82# _CursorKeyMapRecType
83MD_INDEX: Literal[0] = 0
84"""integer index in cursor.description
85
86"""
87
88MD_RESULT_MAP_INDEX: Literal[1] = 1
89"""integer index in compiled._result_columns"""
90
91MD_OBJECTS: Literal[2] = 2
92"""other string keys and ColumnElement obj that can match.
93
94This comes from compiler.RM_OBJECTS / compiler.ResultColumnsEntry.objects
95
96"""
97
98MD_LOOKUP_KEY: Literal[3] = 3
99"""string key we usually expect for key-based lookup
100
101this comes from compiler.RM_NAME / compiler.ResultColumnsEntry.name
102"""
103
104
105MD_RENDERED_NAME: Literal[4] = 4
106"""name that is usually in cursor.description
107
108this comes from compiler.RENDERED_NAME / compiler.ResultColumnsEntry.keyname
109"""
110
111
112MD_PROCESSOR: Literal[5] = 5
113"""callable to process a result value into a row"""
114
115MD_UNTRANSLATED: Literal[6] = 6
116"""raw name from cursor.description"""
117
118
119_CursorKeyMapRecType = Tuple[
120 Optional[int], # MD_INDEX, None means the record is ambiguously named
121 int, # MD_RESULT_MAP_INDEX
122 List[Any], # MD_OBJECTS
123 str, # MD_LOOKUP_KEY
124 str, # MD_RENDERED_NAME
125 Optional["_ResultProcessorType[Any]"], # MD_PROCESSOR
126 Optional[str], # MD_UNTRANSLATED
127]
128
129_CursorKeyMapType = Mapping["_KeyType", _CursorKeyMapRecType]
130
131# same as _CursorKeyMapRecType except the MD_INDEX value is definitely
132# not None
133_NonAmbigCursorKeyMapRecType = Tuple[
134 int,
135 int,
136 List[Any],
137 str,
138 str,
139 Optional["_ResultProcessorType[Any]"],
140 str,
141]
142
143
144class CursorResultMetaData(ResultMetaData):
145 """Result metadata for DBAPI cursors."""
146
147 __slots__ = (
148 "_keymap",
149 "_processors",
150 "_keys",
151 "_keymap_by_result_column_idx",
152 "_tuplefilter",
153 "_translated_indexes",
154 "_safe_for_cache",
155 "_unpickled",
156 "_key_to_index",
157 # don't need _unique_filters support here for now. Can be added
158 # if a need arises.
159 )
160
161 _keymap: _CursorKeyMapType
162 _processors: _ProcessorsType
163 _keymap_by_result_column_idx: Optional[Dict[int, _KeyMapRecType]]
164 _unpickled: bool
165 _safe_for_cache: bool
166 _translated_indexes: Optional[List[int]]
167
168 returns_rows: ClassVar[bool] = True
169
170 def _has_key(self, key: Any) -> bool:
171 return key in self._keymap
172
173 def _for_freeze(self) -> ResultMetaData:
174 return SimpleResultMetaData(
175 self._keys,
176 extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
177 )
178
179 def _make_new_metadata(
180 self,
181 *,
182 unpickled: bool,
183 processors: _ProcessorsType,
184 keys: Sequence[str],
185 keymap: _KeyMapType,
186 tuplefilter: Optional[_TupleGetterType],
187 translated_indexes: Optional[List[int]],
188 safe_for_cache: bool,
189 keymap_by_result_column_idx: Any,
190 ) -> Self:
191 new_obj = self.__class__.__new__(self.__class__)
192 new_obj._unpickled = unpickled
193 new_obj._processors = processors
194 new_obj._keys = keys
195 new_obj._keymap = keymap
196 new_obj._tuplefilter = tuplefilter
197 new_obj._translated_indexes = translated_indexes
198 new_obj._safe_for_cache = safe_for_cache
199 new_obj._keymap_by_result_column_idx = keymap_by_result_column_idx
200 new_obj._key_to_index = self._make_key_to_index(keymap, MD_INDEX)
201 return new_obj
202
203 def _remove_processors(self) -> Self:
204 assert not self._tuplefilter
205 return self._make_new_metadata(
206 unpickled=self._unpickled,
207 processors=[None] * len(self._processors),
208 tuplefilter=None,
209 translated_indexes=None,
210 keymap={
211 key: value[0:5] + (None,) + value[6:]
212 for key, value in self._keymap.items()
213 },
214 keys=self._keys,
215 safe_for_cache=self._safe_for_cache,
216 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
217 )
218
219 def _splice_horizontally(self, other: CursorResultMetaData) -> Self:
220 assert not self._tuplefilter
221
222 keymap = dict(self._keymap)
223 offset = len(self._keys)
224 keymap.update(
225 {
226 key: (
227 # int index should be None for ambiguous key
228 (
229 value[0] + offset
230 if value[0] is not None and key not in keymap
231 else None
232 ),
233 value[1] + offset,
234 *value[2:],
235 )
236 for key, value in other._keymap.items()
237 }
238 )
239 return self._make_new_metadata(
240 unpickled=self._unpickled,
241 processors=self._processors + other._processors, # type: ignore
242 tuplefilter=None,
243 translated_indexes=None,
244 keys=self._keys + other._keys, # type: ignore
245 keymap=keymap,
246 safe_for_cache=self._safe_for_cache,
247 keymap_by_result_column_idx={
248 metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
249 for metadata_entry in keymap.values()
250 },
251 )
252
253 def _reduce(self, keys: Sequence[_KeyIndexType]) -> Self:
254 recs = list(self._metadata_for_keys(keys))
255
256 indexes = [rec[MD_INDEX] for rec in recs]
257 new_keys: List[str] = [rec[MD_LOOKUP_KEY] for rec in recs]
258
259 if self._translated_indexes:
260 indexes = [self._translated_indexes[idx] for idx in indexes]
261 tup = tuplegetter(*indexes)
262 new_recs = [(index,) + rec[1:] for index, rec in enumerate(recs)]
263
264 keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
265 # TODO: need unit test for:
266 # result = connection.execute("raw sql, no columns").scalars()
267 # without the "or ()" it's failing because MD_OBJECTS is None
268 keymap.update(
269 (e, new_rec)
270 for new_rec in new_recs
271 for e in new_rec[MD_OBJECTS] or ()
272 )
273
274 return self._make_new_metadata(
275 unpickled=self._unpickled,
276 processors=self._processors,
277 keys=new_keys,
278 tuplefilter=tup,
279 translated_indexes=indexes,
280 keymap=keymap, # type: ignore[arg-type]
281 safe_for_cache=self._safe_for_cache,
282 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
283 )
284
285 def _adapt_to_context(self, context: ExecutionContext) -> Self:
286 """When using a cached Compiled construct that has a _result_map,
287 for a new statement that used the cached Compiled, we need to ensure
288 the keymap has the Column objects from our new statement as keys.
289 So here we rewrite keymap with new entries for the new columns
290 as matched to those of the cached statement.
291
292 """
293
294 if not context.compiled or not context.compiled._result_columns:
295 return self
296
297 compiled_statement = context.compiled.statement
298 invoked_statement = context.invoked_statement
299
300 if TYPE_CHECKING:
301 assert isinstance(invoked_statement, elements.ClauseElement)
302
303 if compiled_statement is invoked_statement:
304 return self
305
306 assert invoked_statement is not None
307
308 # this is the most common path for Core statements when
309 # caching is used. In ORM use, this codepath is not really used
310 # as the _result_disable_adapt_to_context execution option is
311 # set by the ORM.
312
313 # make a copy and add the columns from the invoked statement
314 # to the result map.
315
316 keymap_by_position = self._keymap_by_result_column_idx
317
318 if keymap_by_position is None:
319 # first retrival from cache, this map will not be set up yet,
320 # initialize lazily
321 keymap_by_position = self._keymap_by_result_column_idx = {
322 metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
323 for metadata_entry in self._keymap.values()
324 }
325
326 assert not self._tuplefilter
327 return self._make_new_metadata(
328 keymap=compat.dict_union(
329 self._keymap,
330 {
331 new: keymap_by_position[idx]
332 for idx, new in enumerate(
333 invoked_statement._all_selected_columns
334 )
335 if idx in keymap_by_position
336 },
337 ),
338 unpickled=self._unpickled,
339 processors=self._processors,
340 tuplefilter=None,
341 translated_indexes=None,
342 keys=self._keys,
343 safe_for_cache=self._safe_for_cache,
344 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
345 )
346
347 def __init__(
348 self,
349 parent: CursorResult[Unpack[TupleAny]],
350 cursor_description: _DBAPICursorDescription,
351 *,
352 driver_column_names: bool = False,
353 ):
354 context = parent.context
355 self._tuplefilter = None
356 self._translated_indexes = None
357 self._safe_for_cache = self._unpickled = False
358
359 if context.result_column_struct:
360 (
361 result_columns,
362 cols_are_ordered,
363 textual_ordered,
364 ad_hoc_textual,
365 loose_column_name_matching,
366 ) = context.result_column_struct
367 num_ctx_cols = len(result_columns)
368 else:
369 result_columns = cols_are_ordered = ( # type: ignore
370 num_ctx_cols
371 ) = ad_hoc_textual = loose_column_name_matching = (
372 textual_ordered
373 ) = False
374
375 # merge cursor.description with the column info
376 # present in the compiled structure, if any
377 raw = self._merge_cursor_description(
378 context,
379 cursor_description,
380 result_columns,
381 num_ctx_cols,
382 cols_are_ordered,
383 textual_ordered,
384 ad_hoc_textual,
385 loose_column_name_matching,
386 driver_column_names,
387 )
388
389 # processors in key order which are used when building up
390 # a row
391 self._processors = [
392 metadata_entry[MD_PROCESSOR] for metadata_entry in raw
393 ]
394
395 # this is used when using this ResultMetaData in a Core-only cache
396 # retrieval context. it's initialized on first cache retrieval
397 # when the _result_disable_adapt_to_context execution option
398 # (which the ORM generally sets) is not set.
399 self._keymap_by_result_column_idx = None
400
401 # for compiled SQL constructs, copy additional lookup keys into
402 # the key lookup map, such as Column objects, labels,
403 # column keys and other names
404 if num_ctx_cols:
405 # keymap by primary string...
406 by_key = {
407 metadata_entry[MD_LOOKUP_KEY]: metadata_entry
408 for metadata_entry in raw
409 }
410
411 if len(by_key) != num_ctx_cols:
412 # if by-primary-string dictionary smaller than
413 # number of columns, assume we have dupes; (this check
414 # is also in place if string dictionary is bigger, as
415 # can occur when '*' was used as one of the compiled columns,
416 # which may or may not be suggestive of dupes), rewrite
417 # dupe records with "None" for index which results in
418 # ambiguous column exception when accessed.
419 #
420 # this is considered to be the less common case as it is not
421 # common to have dupe column keys in a SELECT statement.
422 #
423 # new in 1.4: get the complete set of all possible keys,
424 # strings, objects, whatever, that are dupes across two
425 # different records, first.
426 index_by_key: Dict[Any, Any] = {}
427 dupes = set()
428 for metadata_entry in raw:
429 for key in (metadata_entry[MD_RENDERED_NAME],) + (
430 metadata_entry[MD_OBJECTS] or ()
431 ):
432 idx = metadata_entry[MD_INDEX]
433 # if this key has been associated with more than one
434 # positional index, it's a dupe
435 if index_by_key.setdefault(key, idx) != idx:
436 dupes.add(key)
437
438 # then put everything we have into the keymap excluding only
439 # those keys that are dupes.
440 self._keymap = {
441 obj_elem: metadata_entry
442 for metadata_entry in raw
443 if metadata_entry[MD_OBJECTS]
444 for obj_elem in metadata_entry[MD_OBJECTS]
445 if obj_elem not in dupes
446 }
447
448 # then for the dupe keys, put the "ambiguous column"
449 # record into by_key.
450 by_key.update(
451 {
452 key: (None, None, [], key, key, None, None)
453 for key in dupes
454 }
455 )
456
457 else:
458 # no dupes - copy secondary elements from compiled
459 # columns into self._keymap. this is the most common
460 # codepath for Core / ORM statement executions before the
461 # result metadata is cached
462 self._keymap = {
463 obj_elem: metadata_entry
464 for metadata_entry in raw
465 if metadata_entry[MD_OBJECTS]
466 for obj_elem in metadata_entry[MD_OBJECTS]
467 }
468 # update keymap with primary string names taking
469 # precedence
470 self._keymap.update(by_key)
471 else:
472 # no compiled objects to map, just create keymap by primary string
473 self._keymap = {
474 metadata_entry[MD_LOOKUP_KEY]: metadata_entry
475 for metadata_entry in raw
476 }
477
478 # update keymap with "translated" names.
479 # the "translated" name thing has a long history:
480 # 1. originally, it was used to fix an issue in very old SQLite
481 # versions prior to 3.10.0. This code is still there in the
482 # sqlite dialect.
483 # 2. Next, the pyhive third party dialect started using this hook
484 # for some driver related issue on their end.
485 # 3. Most recently, the "driver_column_names" execution option has
486 # taken advantage of this hook to get raw DBAPI col names in the
487 # result keys without disrupting the usual merge process.
488
489 if driver_column_names or (
490 not num_ctx_cols and context._translate_colname
491 ):
492 self._keymap.update(
493 {
494 metadata_entry[MD_UNTRANSLATED]: self._keymap[
495 metadata_entry[MD_LOOKUP_KEY]
496 ]
497 for metadata_entry in raw
498 if metadata_entry[MD_UNTRANSLATED]
499 }
500 )
501
502 self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
503
504 def _merge_cursor_description(
505 self,
506 context,
507 cursor_description,
508 result_columns,
509 num_ctx_cols,
510 cols_are_ordered,
511 textual_ordered,
512 ad_hoc_textual,
513 loose_column_name_matching,
514 driver_column_names,
515 ):
516 """Merge a cursor.description with compiled result column information.
517
518 There are at least four separate strategies used here, selected
519 depending on the type of SQL construct used to start with.
520
521 The most common case is that of the compiled SQL expression construct,
522 which generated the column names present in the raw SQL string and
523 which has the identical number of columns as were reported by
524 cursor.description. In this case, we assume a 1-1 positional mapping
525 between the entries in cursor.description and the compiled object.
526 This is also the most performant case as we disregard extracting /
527 decoding the column names present in cursor.description since we
528 already have the desired name we generated in the compiled SQL
529 construct.
530
531 The next common case is that of the completely raw string SQL,
532 such as passed to connection.execute(). In this case we have no
533 compiled construct to work with, so we extract and decode the
534 names from cursor.description and index those as the primary
535 result row target keys.
536
537 The remaining fairly common case is that of the textual SQL
538 that includes at least partial column information; this is when
539 we use a :class:`_expression.TextualSelect` construct.
540 This construct may have
541 unordered or ordered column information. In the ordered case, we
542 merge the cursor.description and the compiled construct's information
543 positionally, and warn if there are additional description names
544 present, however we still decode the names in cursor.description
545 as we don't have a guarantee that the names in the columns match
546 on these. In the unordered case, we match names in cursor.description
547 to that of the compiled construct based on name matching.
548 In both of these cases, the cursor.description names and the column
549 expression objects and names are indexed as result row target keys.
550
551 The final case is much less common, where we have a compiled
552 non-textual SQL expression construct, but the number of columns
553 in cursor.description doesn't match what's in the compiled
554 construct. We make the guess here that there might be textual
555 column expressions in the compiled construct that themselves include
556 a comma in them causing them to split. We do the same name-matching
557 as with textual non-ordered columns.
558
559 The name-matched system of merging is the same as that used by
560 SQLAlchemy for all cases up through the 0.9 series. Positional
561 matching for compiled SQL expressions was introduced in 1.0 as a
562 major performance feature, and positional matching for textual
563 :class:`_expression.TextualSelect` objects in 1.1.
564 As name matching is no longer
565 a common case, it was acceptable to factor it into smaller generator-
566 oriented methods that are easier to understand, but incur slightly
567 more performance overhead.
568
569 """
570
571 if (
572 num_ctx_cols
573 and cols_are_ordered
574 and not textual_ordered
575 and num_ctx_cols == len(cursor_description)
576 and not driver_column_names
577 ):
578 self._keys = [elem[0] for elem in result_columns]
579 # pure positional 1-1 case; doesn't need to read
580 # the names from cursor.description
581
582 # most common case for Core and ORM
583
584 # this metadata is safe to
585 # cache because we are guaranteed
586 # to have the columns in the same order for new executions
587 self._safe_for_cache = True
588
589 return [
590 (
591 idx,
592 idx,
593 rmap_entry[RM_OBJECTS],
594 rmap_entry[RM_NAME],
595 rmap_entry[RM_RENDERED_NAME],
596 context.get_result_processor(
597 rmap_entry[RM_TYPE],
598 rmap_entry[RM_RENDERED_NAME],
599 cursor_description[idx][1],
600 ),
601 None,
602 )
603 for idx, rmap_entry in enumerate(result_columns)
604 ]
605 else:
606 # name-based or text-positional cases, where we need
607 # to read cursor.description names
608
609 if textual_ordered or (
610 ad_hoc_textual and len(cursor_description) == num_ctx_cols
611 ):
612 self._safe_for_cache = not driver_column_names
613 # textual positional case
614 raw_iterator = self._merge_textual_cols_by_position(
615 context,
616 cursor_description,
617 result_columns,
618 driver_column_names,
619 )
620 elif num_ctx_cols:
621 # compiled SQL with a mismatch of description cols
622 # vs. compiled cols, or textual w/ unordered columns
623 # the order of columns can change if the query is
624 # against a "select *", so not safe to cache
625 self._safe_for_cache = False
626 raw_iterator = self._merge_cols_by_name(
627 context,
628 cursor_description,
629 result_columns,
630 loose_column_name_matching,
631 driver_column_names,
632 )
633 else:
634 # no compiled SQL, just a raw string, order of columns
635 # can change for "select *"
636 self._safe_for_cache = False
637 raw_iterator = self._merge_cols_by_none(
638 context, cursor_description, driver_column_names
639 )
640
641 return [
642 (
643 idx,
644 ridx,
645 obj,
646 cursor_colname,
647 cursor_colname,
648 context.get_result_processor(
649 mapped_type, cursor_colname, coltype
650 ),
651 untranslated,
652 )
653 for (
654 idx,
655 ridx,
656 cursor_colname,
657 mapped_type,
658 coltype,
659 obj,
660 untranslated,
661 ) in raw_iterator
662 ]
663
664 def _colnames_from_description(
665 self, context, cursor_description, driver_column_names
666 ):
667 """Extract column names and data types from a cursor.description.
668
669 Applies unicode decoding, column translation, "normalization",
670 and case sensitivity rules to the names based on the dialect.
671
672 """
673 dialect = context.dialect
674 translate_colname = context._translate_colname
675 normalize_name = (
676 dialect.normalize_name if dialect.requires_name_normalize else None
677 )
678
679 untranslated = None
680
681 for idx, rec in enumerate(cursor_description):
682 colname = unnormalized = rec[0]
683 coltype = rec[1]
684
685 if translate_colname:
686 # a None here for "untranslated" means "the dialect did not
687 # change the column name and the untranslated case can be
688 # ignored". otherwise "untranslated" is expected to be the
689 # original, unchanged colname (e.g. is == to "unnormalized")
690 colname, untranslated = translate_colname(colname)
691
692 assert untranslated is None or untranslated == unnormalized
693
694 if normalize_name:
695 colname = normalize_name(colname)
696
697 if driver_column_names:
698 yield idx, colname, unnormalized, unnormalized, coltype
699
700 else:
701 yield idx, colname, unnormalized, untranslated, coltype
702
703 def _merge_textual_cols_by_position(
704 self, context, cursor_description, result_columns, driver_column_names
705 ):
706 num_ctx_cols = len(result_columns)
707
708 if num_ctx_cols > len(cursor_description):
709 util.warn(
710 "Number of columns in textual SQL (%d) is "
711 "smaller than number of columns requested (%d)"
712 % (num_ctx_cols, len(cursor_description))
713 )
714 seen = set()
715
716 self._keys = []
717
718 uses_denormalize = context.dialect.requires_name_normalize
719 for (
720 idx,
721 colname,
722 unnormalized,
723 untranslated,
724 coltype,
725 ) in self._colnames_from_description(
726 context, cursor_description, driver_column_names
727 ):
728 if idx < num_ctx_cols:
729 ctx_rec = result_columns[idx]
730 obj = ctx_rec[RM_OBJECTS]
731 ridx = idx
732 mapped_type = ctx_rec[RM_TYPE]
733 if obj[0] in seen:
734 raise exc.InvalidRequestError(
735 "Duplicate column expression requested "
736 "in textual SQL: %r" % obj[0]
737 )
738 seen.add(obj[0])
739
740 # special check for all uppercase unnormalized name;
741 # use the unnormalized name as the key.
742 # see #10788
743 # if these names don't match, then we still honor the
744 # cursor.description name as the key and not what the
745 # Column has, see
746 # test_resultset.py::PositionalTextTest::test_via_column
747 if (
748 uses_denormalize
749 and unnormalized == ctx_rec[RM_RENDERED_NAME]
750 ):
751 result_name = unnormalized
752 else:
753 result_name = colname
754 else:
755 mapped_type = sqltypes.NULLTYPE
756 obj = None
757 ridx = None
758
759 result_name = colname
760
761 if driver_column_names:
762 assert untranslated is not None
763 self._keys.append(untranslated)
764 else:
765 self._keys.append(result_name)
766
767 yield (
768 idx,
769 ridx,
770 result_name,
771 mapped_type,
772 coltype,
773 obj,
774 untranslated,
775 )
776
777 def _merge_cols_by_name(
778 self,
779 context,
780 cursor_description,
781 result_columns,
782 loose_column_name_matching,
783 driver_column_names,
784 ):
785 match_map = self._create_description_match_map(
786 result_columns, loose_column_name_matching
787 )
788 mapped_type: TypeEngine[Any]
789
790 self._keys = []
791
792 for (
793 idx,
794 colname,
795 unnormalized,
796 untranslated,
797 coltype,
798 ) in self._colnames_from_description(
799 context, cursor_description, driver_column_names
800 ):
801 try:
802 ctx_rec = match_map[colname]
803 except KeyError:
804 mapped_type = sqltypes.NULLTYPE
805 obj = None
806 result_columns_idx = None
807 else:
808 obj = ctx_rec[1]
809 mapped_type = ctx_rec[2]
810 result_columns_idx = ctx_rec[3]
811
812 if driver_column_names:
813 assert untranslated is not None
814 self._keys.append(untranslated)
815 else:
816 self._keys.append(colname)
817 yield (
818 idx,
819 result_columns_idx,
820 colname,
821 mapped_type,
822 coltype,
823 obj,
824 untranslated,
825 )
826
827 @classmethod
828 def _create_description_match_map(
829 cls,
830 result_columns: List[ResultColumnsEntry],
831 loose_column_name_matching: bool = False,
832 ) -> Dict[
833 Union[str, object], Tuple[str, Tuple[Any, ...], TypeEngine[Any], int]
834 ]:
835 """when matching cursor.description to a set of names that are present
836 in a Compiled object, as is the case with TextualSelect, get all the
837 names we expect might match those in cursor.description.
838 """
839
840 d: Dict[
841 Union[str, object],
842 Tuple[str, Tuple[Any, ...], TypeEngine[Any], int],
843 ] = {}
844 for ridx, elem in enumerate(result_columns):
845 key = elem[RM_RENDERED_NAME]
846
847 if key in d:
848 # conflicting keyname - just add the column-linked objects
849 # to the existing record. if there is a duplicate column
850 # name in the cursor description, this will allow all of those
851 # objects to raise an ambiguous column error
852 e_name, e_obj, e_type, e_ridx = d[key]
853 d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx
854 else:
855 d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx)
856
857 if loose_column_name_matching:
858 # when using a textual statement with an unordered set
859 # of columns that line up, we are expecting the user
860 # to be using label names in the SQL that match to the column
861 # expressions. Enable more liberal matching for this case;
862 # duplicate keys that are ambiguous will be fixed later.
863 for r_key in elem[RM_OBJECTS]:
864 d.setdefault(
865 r_key,
866 (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx),
867 )
868 return d
869
870 def _merge_cols_by_none(
871 self, context, cursor_description, driver_column_names
872 ):
873 self._keys = []
874
875 for (
876 idx,
877 colname,
878 unnormalized,
879 untranslated,
880 coltype,
881 ) in self._colnames_from_description(
882 context, cursor_description, driver_column_names
883 ):
884
885 if driver_column_names:
886 assert untranslated is not None
887 self._keys.append(untranslated)
888 else:
889 self._keys.append(colname)
890
891 yield (
892 idx,
893 None,
894 colname,
895 sqltypes.NULLTYPE,
896 coltype,
897 None,
898 untranslated,
899 )
900
901 if not TYPE_CHECKING:
902
903 def _key_fallback(
904 self, key: Any, err: Optional[Exception], raiseerr: bool = True
905 ) -> Optional[NoReturn]:
906 if raiseerr:
907 if self._unpickled and isinstance(key, elements.ColumnElement):
908 raise exc.NoSuchColumnError(
909 "Row was unpickled; lookup by ColumnElement "
910 "is unsupported"
911 ) from err
912 else:
913 raise exc.NoSuchColumnError(
914 "Could not locate column in row for column '%s'"
915 % util.string_or_unprintable(key)
916 ) from err
917 else:
918 return None
919
920 def _raise_for_ambiguous_column_name(self, rec):
921 raise exc.InvalidRequestError(
922 "Ambiguous column name '%s' in "
923 "result set column descriptions" % rec[MD_LOOKUP_KEY]
924 )
925
926 def _index_for_key(self, key: Any, raiseerr: bool = True) -> Optional[int]:
927 # TODO: can consider pre-loading ints and negative ints
928 # into _keymap - also no coverage here
929 if isinstance(key, int):
930 key = self._keys[key]
931
932 try:
933 rec = self._keymap[key]
934 except KeyError as ke:
935 x = self._key_fallback(key, ke, raiseerr)
936 assert x is None
937 return None
938
939 index = rec[0]
940
941 if index is None:
942 self._raise_for_ambiguous_column_name(rec)
943 return index
944
945 def _indexes_for_keys(self, keys):
946 try:
947 return [self._keymap[key][0] for key in keys]
948 except KeyError as ke:
949 # ensure it raises
950 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
951
952 def _metadata_for_keys(
953 self, keys: Sequence[Any]
954 ) -> Iterator[_NonAmbigCursorKeyMapRecType]:
955 for key in keys:
956 if int in key.__class__.__mro__:
957 key = self._keys[key]
958
959 try:
960 rec = self._keymap[key]
961 except KeyError as ke:
962 # ensure it raises
963 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
964
965 index = rec[MD_INDEX]
966
967 if index is None:
968 self._raise_for_ambiguous_column_name(rec)
969
970 yield cast(_NonAmbigCursorKeyMapRecType, rec)
971
972 def __getstate__(self):
973 # TODO: consider serializing this as SimpleResultMetaData
974 return {
975 "_keymap": {
976 key: (
977 rec[MD_INDEX],
978 rec[MD_RESULT_MAP_INDEX],
979 [],
980 key,
981 rec[MD_RENDERED_NAME],
982 None,
983 None,
984 )
985 for key, rec in self._keymap.items()
986 if isinstance(key, (str, int))
987 },
988 "_keys": self._keys,
989 "_translated_indexes": self._translated_indexes,
990 }
991
992 def __setstate__(self, state):
993 self._processors = [None for _ in range(len(state["_keys"]))]
994 self._keymap = state["_keymap"]
995 self._keymap_by_result_column_idx = None
996 self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
997 self._keys = state["_keys"]
998 self._unpickled = True
999 if state["_translated_indexes"]:
1000 self._translated_indexes = cast(
1001 "List[int]", state["_translated_indexes"]
1002 )
1003 self._tuplefilter = tuplegetter(*self._translated_indexes)
1004 else:
1005 self._translated_indexes = self._tuplefilter = None
1006
1007
1008class ResultFetchStrategy:
1009 """Define a fetching strategy for a result object.
1010
1011
1012 .. versionadded:: 1.4
1013
1014 """
1015
1016 __slots__ = ()
1017
1018 alternate_cursor_description: Optional[_DBAPICursorDescription] = None
1019
1020 def soft_close(
1021 self,
1022 result: CursorResult[Unpack[TupleAny]],
1023 dbapi_cursor: Optional[DBAPICursor],
1024 ) -> None:
1025 raise NotImplementedError()
1026
1027 def hard_close(
1028 self,
1029 result: CursorResult[Unpack[TupleAny]],
1030 dbapi_cursor: Optional[DBAPICursor],
1031 ) -> None:
1032 raise NotImplementedError()
1033
1034 def yield_per(
1035 self,
1036 result: CursorResult[Unpack[TupleAny]],
1037 dbapi_cursor: Optional[DBAPICursor],
1038 num: int,
1039 ) -> None:
1040 return
1041
1042 def fetchone(
1043 self,
1044 result: CursorResult[Unpack[TupleAny]],
1045 dbapi_cursor: DBAPICursor,
1046 hard_close: bool = False,
1047 ) -> Any:
1048 raise NotImplementedError()
1049
1050 def fetchmany(
1051 self,
1052 result: CursorResult[Unpack[TupleAny]],
1053 dbapi_cursor: DBAPICursor,
1054 size: Optional[int] = None,
1055 ) -> Any:
1056 raise NotImplementedError()
1057
1058 def fetchall(
1059 self,
1060 result: CursorResult[Unpack[TupleAny]],
1061 dbapi_cursor: DBAPICursor,
1062 ) -> Any:
1063 raise NotImplementedError()
1064
1065 def handle_exception(
1066 self,
1067 result: CursorResult[Unpack[TupleAny]],
1068 dbapi_cursor: Optional[DBAPICursor],
1069 err: BaseException,
1070 ) -> NoReturn:
1071 raise err
1072
1073
1074class NoCursorFetchStrategy(ResultFetchStrategy):
1075 """Cursor strategy for a result that has no open cursor.
1076
1077 There are two varieties of this strategy, one for DQL and one for
1078 DML (and also DDL), each of which represent a result that had a cursor
1079 but no longer has one.
1080
1081 """
1082
1083 __slots__ = ()
1084
1085 def soft_close(self, result, dbapi_cursor):
1086 pass
1087
1088 def hard_close(self, result, dbapi_cursor):
1089 pass
1090
1091 def fetchone(self, result, dbapi_cursor, hard_close=False):
1092 return self._non_result(result, None)
1093
1094 def fetchmany(self, result, dbapi_cursor, size=None):
1095 return self._non_result(result, [])
1096
1097 def fetchall(self, result, dbapi_cursor):
1098 return self._non_result(result, [])
1099
1100 def _non_result(self, result, default, err=None):
1101 raise NotImplementedError()
1102
1103
1104class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
1105 """Cursor strategy for a DQL result that has no open cursor.
1106
1107 This is a result set that can return rows, i.e. for a SELECT, or for an
1108 INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
1109 where the cursor is closed and no rows remain available. The owning result
1110 object may or may not be "hard closed", which determines if the fetch
1111 methods send empty results or raise for closed result.
1112
1113 """
1114
1115 __slots__ = ()
1116
1117 def _non_result(self, result, default, err=None):
1118 if result.closed:
1119 raise exc.ResourceClosedError(
1120 "This result object is closed."
1121 ) from err
1122 else:
1123 return default
1124
1125
1126_NO_CURSOR_DQL = NoCursorDQLFetchStrategy()
1127
1128
1129class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
1130 """Cursor strategy for a DML result that has no open cursor.
1131
1132 This is a result set that does not return rows, i.e. for an INSERT,
1133 UPDATE, DELETE that does not include RETURNING.
1134
1135 """
1136
1137 __slots__ = ()
1138
1139 def _non_result(self, result, default, err=None):
1140 # we only expect to have a _NoResultMetaData() here right now.
1141 assert not result._metadata.returns_rows
1142 result._metadata._we_dont_return_rows(err)
1143
1144
1145_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
1146
1147
1148class CursorFetchStrategy(ResultFetchStrategy):
1149 """Call fetch methods from a DBAPI cursor.
1150
1151 Alternate versions of this class may instead buffer the rows from
1152 cursors or not use cursors at all.
1153
1154 """
1155
1156 __slots__ = ()
1157
1158 def soft_close(
1159 self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
1160 ) -> None:
1161 result.cursor_strategy = _NO_CURSOR_DQL
1162
1163 def hard_close(
1164 self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
1165 ) -> None:
1166 result.cursor_strategy = _NO_CURSOR_DQL
1167
1168 def handle_exception(
1169 self,
1170 result: CursorResult[Any],
1171 dbapi_cursor: Optional[DBAPICursor],
1172 err: BaseException,
1173 ) -> NoReturn:
1174 result.connection._handle_dbapi_exception(
1175 err, None, None, dbapi_cursor, result.context
1176 )
1177
1178 def yield_per(
1179 self,
1180 result: CursorResult[Any],
1181 dbapi_cursor: Optional[DBAPICursor],
1182 num: int,
1183 ) -> None:
1184 result.cursor_strategy = BufferedRowCursorFetchStrategy(
1185 dbapi_cursor,
1186 {"max_row_buffer": num},
1187 initial_buffer=collections.deque(),
1188 growth_factor=0,
1189 )
1190
1191 def fetchone(
1192 self,
1193 result: CursorResult[Any],
1194 dbapi_cursor: DBAPICursor,
1195 hard_close: bool = False,
1196 ) -> Any:
1197 try:
1198 row = dbapi_cursor.fetchone()
1199 if row is None:
1200 result._soft_close(hard=hard_close)
1201 return row
1202 except BaseException as e:
1203 self.handle_exception(result, dbapi_cursor, e)
1204
1205 def fetchmany(
1206 self,
1207 result: CursorResult[Any],
1208 dbapi_cursor: DBAPICursor,
1209 size: Optional[int] = None,
1210 ) -> Any:
1211 try:
1212 if size is None:
1213 l = dbapi_cursor.fetchmany()
1214 else:
1215 l = dbapi_cursor.fetchmany(size)
1216
1217 if not l:
1218 result._soft_close()
1219 return l
1220 except BaseException as e:
1221 self.handle_exception(result, dbapi_cursor, e)
1222
1223 def fetchall(
1224 self,
1225 result: CursorResult[Any],
1226 dbapi_cursor: DBAPICursor,
1227 ) -> Any:
1228 try:
1229 rows = dbapi_cursor.fetchall()
1230 result._soft_close()
1231 return rows
1232 except BaseException as e:
1233 self.handle_exception(result, dbapi_cursor, e)
1234
1235
1236_DEFAULT_FETCH = CursorFetchStrategy()
1237
1238
1239class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
1240 """A cursor fetch strategy with row buffering behavior.
1241
1242 This strategy buffers the contents of a selection of rows
1243 before ``fetchone()`` is called. This is to allow the results of
1244 ``cursor.description`` to be available immediately, when
1245 interfacing with a DB-API that requires rows to be consumed before
1246 this information is available (currently psycopg2, when used with
1247 server-side cursors).
1248
1249 The pre-fetching behavior fetches only one row initially, and then
1250 grows its buffer size by a fixed amount with each successive need
1251 for additional rows up the ``max_row_buffer`` size, which defaults
1252 to 1000::
1253
1254 with psycopg2_engine.connect() as conn:
1255
1256 result = conn.execution_options(
1257 stream_results=True, max_row_buffer=50
1258 ).execute(text("select * from table"))
1259
1260 .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
1261
1262 .. seealso::
1263
1264 :ref:`psycopg2_execution_options`
1265 """
1266
1267 __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
1268
1269 def __init__(
1270 self,
1271 dbapi_cursor,
1272 execution_options,
1273 growth_factor=5,
1274 initial_buffer=None,
1275 ):
1276 self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
1277
1278 if initial_buffer is not None:
1279 self._rowbuffer = initial_buffer
1280 else:
1281 self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
1282 self._growth_factor = growth_factor
1283
1284 if growth_factor:
1285 self._bufsize = min(self._max_row_buffer, self._growth_factor)
1286 else:
1287 self._bufsize = self._max_row_buffer
1288
1289 @classmethod
1290 def create(cls, result):
1291 return BufferedRowCursorFetchStrategy(
1292 result.cursor,
1293 result.context.execution_options,
1294 )
1295
1296 def _buffer_rows(self, result, dbapi_cursor):
1297 """this is currently used only by fetchone()."""
1298
1299 size = self._bufsize
1300 try:
1301 if size < 1:
1302 new_rows = dbapi_cursor.fetchall()
1303 else:
1304 new_rows = dbapi_cursor.fetchmany(size)
1305 except BaseException as e:
1306 self.handle_exception(result, dbapi_cursor, e)
1307
1308 if not new_rows:
1309 return
1310 self._rowbuffer = collections.deque(new_rows)
1311 if self._growth_factor and size < self._max_row_buffer:
1312 self._bufsize = min(
1313 self._max_row_buffer, size * self._growth_factor
1314 )
1315
1316 def yield_per(self, result, dbapi_cursor, num):
1317 self._growth_factor = 0
1318 self._max_row_buffer = self._bufsize = num
1319
1320 def soft_close(self, result, dbapi_cursor):
1321 self._rowbuffer.clear()
1322 super().soft_close(result, dbapi_cursor)
1323
1324 def hard_close(self, result, dbapi_cursor):
1325 self._rowbuffer.clear()
1326 super().hard_close(result, dbapi_cursor)
1327
1328 def fetchone(self, result, dbapi_cursor, hard_close=False):
1329 if not self._rowbuffer:
1330 self._buffer_rows(result, dbapi_cursor)
1331 if not self._rowbuffer:
1332 try:
1333 result._soft_close(hard=hard_close)
1334 except BaseException as e:
1335 self.handle_exception(result, dbapi_cursor, e)
1336 return None
1337 return self._rowbuffer.popleft()
1338
1339 def fetchmany(self, result, dbapi_cursor, size=None):
1340 if size is None:
1341 return self.fetchall(result, dbapi_cursor)
1342
1343 rb = self._rowbuffer
1344 lb = len(rb)
1345 close = False
1346 if size > lb:
1347 try:
1348 new = dbapi_cursor.fetchmany(size - lb)
1349 except BaseException as e:
1350 self.handle_exception(result, dbapi_cursor, e)
1351 else:
1352 if not new:
1353 # defer closing since it may clear the row buffer
1354 close = True
1355 else:
1356 rb.extend(new)
1357
1358 res = [rb.popleft() for _ in range(min(size, len(rb)))]
1359 if close:
1360 result._soft_close()
1361 return res
1362
1363 def fetchall(self, result, dbapi_cursor):
1364 try:
1365 ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
1366 self._rowbuffer.clear()
1367 result._soft_close()
1368 return ret
1369 except BaseException as e:
1370 self.handle_exception(result, dbapi_cursor, e)
1371
1372
1373class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
1374 """A cursor strategy that buffers rows fully upon creation.
1375
1376 Used for operations where a result is to be delivered
1377 after the database conversation can not be continued,
1378 such as MSSQL INSERT...OUTPUT after an autocommit.
1379
1380 """
1381
1382 __slots__ = ("_rowbuffer", "alternate_cursor_description")
1383
1384 def __init__(
1385 self, dbapi_cursor, alternate_description=None, initial_buffer=None
1386 ):
1387 self.alternate_cursor_description = alternate_description
1388 if initial_buffer is not None:
1389 self._rowbuffer = collections.deque(initial_buffer)
1390 else:
1391 self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
1392
1393 def yield_per(self, result, dbapi_cursor, num):
1394 pass
1395
1396 def soft_close(self, result, dbapi_cursor):
1397 self._rowbuffer.clear()
1398 super().soft_close(result, dbapi_cursor)
1399
1400 def hard_close(self, result, dbapi_cursor):
1401 self._rowbuffer.clear()
1402 super().hard_close(result, dbapi_cursor)
1403
1404 def fetchone(self, result, dbapi_cursor, hard_close=False):
1405 if self._rowbuffer:
1406 return self._rowbuffer.popleft()
1407 else:
1408 result._soft_close(hard=hard_close)
1409 return None
1410
1411 def fetchmany(self, result, dbapi_cursor, size=None):
1412 if size is None:
1413 return self.fetchall(result, dbapi_cursor)
1414
1415 rb = self._rowbuffer
1416 rows = [rb.popleft() for _ in range(min(size, len(rb)))]
1417 if not rows:
1418 result._soft_close()
1419 return rows
1420
1421 def fetchall(self, result, dbapi_cursor):
1422 ret = self._rowbuffer
1423 self._rowbuffer = collections.deque()
1424 result._soft_close()
1425 return ret
1426
1427
1428class _NoResultMetaData(ResultMetaData):
1429 __slots__ = ()
1430
1431 returns_rows = False
1432
1433 def _we_dont_return_rows(self, err=None):
1434 raise exc.ResourceClosedError(
1435 "This result object does not return rows. "
1436 "It has been closed automatically."
1437 ) from err
1438
1439 def _index_for_key(self, keys, raiseerr):
1440 self._we_dont_return_rows()
1441
1442 def _metadata_for_keys(self, key):
1443 self._we_dont_return_rows()
1444
1445 def _reduce(self, keys):
1446 self._we_dont_return_rows()
1447
1448 @property
1449 def _keymap(self):
1450 self._we_dont_return_rows()
1451
1452 @property
1453 def _key_to_index(self):
1454 self._we_dont_return_rows()
1455
1456 @property
1457 def _processors(self):
1458 self._we_dont_return_rows()
1459
1460 @property
1461 def keys(self):
1462 self._we_dont_return_rows()
1463
1464
1465_NO_RESULT_METADATA = _NoResultMetaData()
1466
1467
1468def null_dml_result() -> IteratorResult[Any]:
1469 it: IteratorResult[Any] = IteratorResult(_NoResultMetaData(), iter([]))
1470 it._soft_close()
1471 return it
1472
1473
1474class CursorResult(Result[Unpack[_Ts]]):
1475 """A Result that is representing state from a DBAPI cursor.
1476
1477 .. versionchanged:: 1.4 The :class:`.CursorResult``
1478 class replaces the previous :class:`.ResultProxy` interface.
1479 This classes are based on the :class:`.Result` calling API
1480 which provides an updated usage model and calling facade for
1481 SQLAlchemy Core and SQLAlchemy ORM.
1482
1483 Returns database rows via the :class:`.Row` class, which provides
1484 additional API features and behaviors on top of the raw data returned by
1485 the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
1486 method, other kinds of objects may also be returned.
1487
1488 .. seealso::
1489
1490 :ref:`tutorial_selecting_data` - introductory material for accessing
1491 :class:`_engine.CursorResult` and :class:`.Row` objects.
1492
1493 """
1494
1495 __slots__ = (
1496 "context",
1497 "dialect",
1498 "cursor",
1499 "cursor_strategy",
1500 "_echo",
1501 "connection",
1502 )
1503
1504 _metadata: Union[CursorResultMetaData, _NoResultMetaData]
1505 _no_result_metadata = _NO_RESULT_METADATA
1506 _soft_closed: bool = False
1507 closed: bool = False
1508 _is_cursor = True
1509
1510 context: DefaultExecutionContext
1511 dialect: Dialect
1512 cursor_strategy: ResultFetchStrategy
1513 connection: Connection
1514
1515 def __init__(
1516 self,
1517 context: DefaultExecutionContext,
1518 cursor_strategy: ResultFetchStrategy,
1519 cursor_description: Optional[_DBAPICursorDescription],
1520 ):
1521 self.context = context
1522 self.dialect = context.dialect
1523 self.cursor = context.cursor
1524 self.cursor_strategy = cursor_strategy
1525 self.connection = context.root_connection
1526 self._echo = echo = (
1527 self.connection._echo and context.engine._should_log_debug()
1528 )
1529
1530 if cursor_description is not None:
1531 # inline of Result._row_getter(), set up an initial row
1532 # getter assuming no transformations will be called as this
1533 # is the most common case
1534
1535 metadata = self._init_metadata(context, cursor_description)
1536
1537 _make_row: Any
1538 _make_row = functools.partial(
1539 Row,
1540 metadata,
1541 metadata._effective_processors,
1542 metadata._key_to_index,
1543 )
1544
1545 if context._num_sentinel_cols:
1546 sentinel_filter = operator.itemgetter(
1547 slice(-context._num_sentinel_cols)
1548 )
1549
1550 def _sliced_row(raw_data):
1551 return _make_row(sentinel_filter(raw_data))
1552
1553 sliced_row = _sliced_row
1554 else:
1555 sliced_row = _make_row
1556
1557 if echo:
1558 log = self.context.connection._log_debug
1559
1560 def _log_row(row):
1561 log("Row %r", sql_util._repr_row(row))
1562 return row
1563
1564 self._row_logging_fn = _log_row
1565
1566 def _make_row_2(row):
1567 return _log_row(sliced_row(row))
1568
1569 make_row = _make_row_2
1570 else:
1571 make_row = sliced_row
1572 self._set_memoized_attribute("_row_getter", make_row)
1573
1574 else:
1575 assert context._num_sentinel_cols == 0
1576 self._metadata = self._no_result_metadata
1577
1578 def _init_metadata(self, context, cursor_description):
1579 driver_column_names = context.execution_options.get(
1580 "driver_column_names", False
1581 )
1582 if context.compiled:
1583 compiled = context.compiled
1584
1585 metadata: CursorResultMetaData
1586
1587 if driver_column_names:
1588 metadata = CursorResultMetaData(
1589 self, cursor_description, driver_column_names=True
1590 )
1591 assert not metadata._safe_for_cache
1592 elif compiled._cached_metadata:
1593 metadata = compiled._cached_metadata
1594 else:
1595 metadata = CursorResultMetaData(self, cursor_description)
1596 if metadata._safe_for_cache:
1597 compiled._cached_metadata = metadata
1598
1599 # result rewrite/ adapt step. this is to suit the case
1600 # when we are invoked against a cached Compiled object, we want
1601 # to rewrite the ResultMetaData to reflect the Column objects
1602 # that are in our current SQL statement object, not the one
1603 # that is associated with the cached Compiled object.
1604 # the Compiled object may also tell us to not
1605 # actually do this step; this is to support the ORM where
1606 # it is to produce a new Result object in any case, and will
1607 # be using the cached Column objects against this database result
1608 # so we don't want to rewrite them.
1609 #
1610 # Basically this step suits the use case where the end user
1611 # is using Core SQL expressions and is accessing columns in the
1612 # result row using row._mapping[table.c.column].
1613 if (
1614 not context.execution_options.get(
1615 "_result_disable_adapt_to_context", False
1616 )
1617 and compiled._result_columns
1618 and context.cache_hit is context.dialect.CACHE_HIT
1619 and compiled.statement is not context.invoked_statement
1620 ):
1621 metadata = metadata._adapt_to_context(context)
1622
1623 self._metadata = metadata
1624
1625 else:
1626 self._metadata = metadata = CursorResultMetaData(
1627 self,
1628 cursor_description,
1629 driver_column_names=driver_column_names,
1630 )
1631 if self._echo:
1632 context.connection._log_debug(
1633 "Col %r", tuple(x[0] for x in cursor_description)
1634 )
1635 return metadata
1636
1637 def _soft_close(self, hard=False):
1638 """Soft close this :class:`_engine.CursorResult`.
1639
1640 This releases all DBAPI cursor resources, but leaves the
1641 CursorResult "open" from a semantic perspective, meaning the
1642 fetchXXX() methods will continue to return empty results.
1643
1644 This method is called automatically when:
1645
1646 * all result rows are exhausted using the fetchXXX() methods.
1647 * cursor.description is None.
1648
1649 This method is **not public**, but is documented in order to clarify
1650 the "autoclose" process used.
1651
1652 .. seealso::
1653
1654 :meth:`_engine.CursorResult.close`
1655
1656
1657 """
1658
1659 if (not hard and self._soft_closed) or (hard and self.closed):
1660 return
1661
1662 if hard:
1663 self.closed = True
1664 self.cursor_strategy.hard_close(self, self.cursor)
1665 else:
1666 self.cursor_strategy.soft_close(self, self.cursor)
1667
1668 if not self._soft_closed:
1669 cursor = self.cursor
1670 self.cursor = None # type: ignore
1671 self.connection._safe_close_cursor(cursor)
1672 self._soft_closed = True
1673
1674 @property
1675 def inserted_primary_key_rows(self):
1676 """Return the value of
1677 :attr:`_engine.CursorResult.inserted_primary_key`
1678 as a row contained within a list; some dialects may support a
1679 multiple row form as well.
1680
1681 .. note:: As indicated below, in current SQLAlchemy versions this
1682 accessor is only useful beyond what's already supplied by
1683 :attr:`_engine.CursorResult.inserted_primary_key` when using the
1684 :ref:`postgresql_psycopg2` dialect. Future versions hope to
1685 generalize this feature to more dialects.
1686
1687 This accessor is added to support dialects that offer the feature
1688 that is currently implemented by the :ref:`psycopg2_executemany_mode`
1689 feature, currently **only the psycopg2 dialect**, which provides
1690 for many rows to be INSERTed at once while still retaining the
1691 behavior of being able to return server-generated primary key values.
1692
1693 * **When using the psycopg2 dialect, or other dialects that may support
1694 "fast executemany" style inserts in upcoming releases** : When
1695 invoking an INSERT statement while passing a list of rows as the
1696 second argument to :meth:`_engine.Connection.execute`, this accessor
1697 will then provide a list of rows, where each row contains the primary
1698 key value for each row that was INSERTed.
1699
1700 * **When using all other dialects / backends that don't yet support
1701 this feature**: This accessor is only useful for **single row INSERT
1702 statements**, and returns the same information as that of the
1703 :attr:`_engine.CursorResult.inserted_primary_key` within a
1704 single-element list. When an INSERT statement is executed in
1705 conjunction with a list of rows to be INSERTed, the list will contain
1706 one row per row inserted in the statement, however it will contain
1707 ``None`` for any server-generated values.
1708
1709 Future releases of SQLAlchemy will further generalize the
1710 "fast execution helper" feature of psycopg2 to suit other dialects,
1711 thus allowing this accessor to be of more general use.
1712
1713 .. versionadded:: 1.4
1714
1715 .. seealso::
1716
1717 :attr:`_engine.CursorResult.inserted_primary_key`
1718
1719 """
1720 if not self.context.compiled:
1721 raise exc.InvalidRequestError(
1722 "Statement is not a compiled expression construct."
1723 )
1724 elif not self.context.isinsert:
1725 raise exc.InvalidRequestError(
1726 "Statement is not an insert() expression construct."
1727 )
1728 elif self.context._is_explicit_returning:
1729 raise exc.InvalidRequestError(
1730 "Can't call inserted_primary_key "
1731 "when returning() "
1732 "is used."
1733 )
1734 return self.context.inserted_primary_key_rows
1735
1736 @property
1737 def inserted_primary_key(self):
1738 """Return the primary key for the row just inserted.
1739
1740 The return value is a :class:`_result.Row` object representing
1741 a named tuple of primary key values in the order in which the
1742 primary key columns are configured in the source
1743 :class:`_schema.Table`.
1744
1745 .. versionchanged:: 1.4.8 - the
1746 :attr:`_engine.CursorResult.inserted_primary_key`
1747 value is now a named tuple via the :class:`_result.Row` class,
1748 rather than a plain tuple.
1749
1750 This accessor only applies to single row :func:`_expression.insert`
1751 constructs which did not explicitly specify
1752 :meth:`_expression.Insert.returning`. Support for multirow inserts,
1753 while not yet available for most backends, would be accessed using
1754 the :attr:`_engine.CursorResult.inserted_primary_key_rows` accessor.
1755
1756 Note that primary key columns which specify a server_default clause, or
1757 otherwise do not qualify as "autoincrement" columns (see the notes at
1758 :class:`_schema.Column`), and were generated using the database-side
1759 default, will appear in this list as ``None`` unless the backend
1760 supports "returning" and the insert statement executed with the
1761 "implicit returning" enabled.
1762
1763 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1764 statement is not a compiled expression construct
1765 or is not an insert() construct.
1766
1767 """
1768
1769 if self.context.executemany:
1770 raise exc.InvalidRequestError(
1771 "This statement was an executemany call; if primary key "
1772 "returning is supported, please "
1773 "use .inserted_primary_key_rows."
1774 )
1775
1776 ikp = self.inserted_primary_key_rows
1777 if ikp:
1778 return ikp[0]
1779 else:
1780 return None
1781
1782 def last_updated_params(self):
1783 """Return the collection of updated parameters from this
1784 execution.
1785
1786 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1787 statement is not a compiled expression construct
1788 or is not an update() construct.
1789
1790 """
1791 if not self.context.compiled:
1792 raise exc.InvalidRequestError(
1793 "Statement is not a compiled expression construct."
1794 )
1795 elif not self.context.isupdate:
1796 raise exc.InvalidRequestError(
1797 "Statement is not an update() expression construct."
1798 )
1799 elif self.context.executemany:
1800 return self.context.compiled_parameters
1801 else:
1802 return self.context.compiled_parameters[0]
1803
1804 def last_inserted_params(self):
1805 """Return the collection of inserted parameters from this
1806 execution.
1807
1808 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1809 statement is not a compiled expression construct
1810 or is not an insert() construct.
1811
1812 """
1813 if not self.context.compiled:
1814 raise exc.InvalidRequestError(
1815 "Statement is not a compiled expression construct."
1816 )
1817 elif not self.context.isinsert:
1818 raise exc.InvalidRequestError(
1819 "Statement is not an insert() expression construct."
1820 )
1821 elif self.context.executemany:
1822 return self.context.compiled_parameters
1823 else:
1824 return self.context.compiled_parameters[0]
1825
1826 @property
1827 def returned_defaults_rows(self):
1828 """Return a list of rows each containing the values of default
1829 columns that were fetched using
1830 the :meth:`.ValuesBase.return_defaults` feature.
1831
1832 The return value is a list of :class:`.Row` objects.
1833
1834 .. versionadded:: 1.4
1835
1836 """
1837 return self.context.returned_default_rows
1838
1839 def splice_horizontally(self, other):
1840 """Return a new :class:`.CursorResult` that "horizontally splices"
1841 together the rows of this :class:`.CursorResult` with that of another
1842 :class:`.CursorResult`.
1843
1844 .. tip:: This method is for the benefit of the SQLAlchemy ORM and is
1845 not intended for general use.
1846
1847 "horizontally splices" means that for each row in the first and second
1848 result sets, a new row that concatenates the two rows together is
1849 produced, which then becomes the new row. The incoming
1850 :class:`.CursorResult` must have the identical number of rows. It is
1851 typically expected that the two result sets come from the same sort
1852 order as well, as the result rows are spliced together based on their
1853 position in the result.
1854
1855 The expected use case here is so that multiple INSERT..RETURNING
1856 statements (which definitely need to be sorted) against different
1857 tables can produce a single result that looks like a JOIN of those two
1858 tables.
1859
1860 E.g.::
1861
1862 r1 = connection.execute(
1863 users.insert().returning(
1864 users.c.user_name,
1865 users.c.user_id,
1866 sort_by_parameter_order=True
1867 ),
1868 user_values
1869 )
1870
1871 r2 = connection.execute(
1872 addresses.insert().returning(
1873 addresses.c.address_id,
1874 addresses.c.address,
1875 addresses.c.user_id,
1876 sort_by_parameter_order=True
1877 ),
1878 address_values
1879 )
1880
1881 rows = r1.splice_horizontally(r2).all()
1882 assert (
1883 rows ==
1884 [
1885 ("john", 1, 1, "foo@bar.com", 1),
1886 ("jack", 2, 2, "bar@bat.com", 2),
1887 ]
1888 )
1889
1890 .. versionadded:: 2.0
1891
1892 .. seealso::
1893
1894 :meth:`.CursorResult.splice_vertically`
1895
1896
1897 """
1898
1899 clone = self._generate()
1900 total_rows = [
1901 tuple(r1) + tuple(r2)
1902 for r1, r2 in zip(
1903 list(self._raw_row_iterator()),
1904 list(other._raw_row_iterator()),
1905 )
1906 ]
1907
1908 clone._metadata = clone._metadata._splice_horizontally(other._metadata)
1909
1910 clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
1911 None,
1912 initial_buffer=total_rows,
1913 )
1914 clone._reset_memoizations()
1915 return clone
1916
1917 def splice_vertically(self, other):
1918 """Return a new :class:`.CursorResult` that "vertically splices",
1919 i.e. "extends", the rows of this :class:`.CursorResult` with that of
1920 another :class:`.CursorResult`.
1921
1922 .. tip:: This method is for the benefit of the SQLAlchemy ORM and is
1923 not intended for general use.
1924
1925 "vertically splices" means the rows of the given result are appended to
1926 the rows of this cursor result. The incoming :class:`.CursorResult`
1927 must have rows that represent the identical list of columns in the
1928 identical order as they are in this :class:`.CursorResult`.
1929
1930 .. versionadded:: 2.0
1931
1932 .. seealso::
1933
1934 :meth:`.CursorResult.splice_horizontally`
1935
1936 """
1937 clone = self._generate()
1938 total_rows = list(self._raw_row_iterator()) + list(
1939 other._raw_row_iterator()
1940 )
1941
1942 clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
1943 None,
1944 initial_buffer=total_rows,
1945 )
1946 clone._reset_memoizations()
1947 return clone
1948
1949 def _rewind(self, rows):
1950 """rewind this result back to the given rowset.
1951
1952 this is used internally for the case where an :class:`.Insert`
1953 construct combines the use of
1954 :meth:`.Insert.return_defaults` along with the
1955 "supplemental columns" feature.
1956
1957 """
1958
1959 if self._echo:
1960 self.context.connection._log_debug(
1961 "CursorResult rewound %d row(s)", len(rows)
1962 )
1963
1964 # the rows given are expected to be Row objects, so we
1965 # have to clear out processors which have already run on these
1966 # rows
1967 self._metadata = cast(
1968 CursorResultMetaData, self._metadata
1969 )._remove_processors()
1970
1971 self.cursor_strategy = FullyBufferedCursorFetchStrategy(
1972 None,
1973 # TODO: if these are Row objects, can we save on not having to
1974 # re-make new Row objects out of them a second time? is that
1975 # what's actually happening right now? maybe look into this
1976 initial_buffer=rows,
1977 )
1978 self._reset_memoizations()
1979 return self
1980
1981 @property
1982 def returned_defaults(self):
1983 """Return the values of default columns that were fetched using
1984 the :meth:`.ValuesBase.return_defaults` feature.
1985
1986 The value is an instance of :class:`.Row`, or ``None``
1987 if :meth:`.ValuesBase.return_defaults` was not used or if the
1988 backend does not support RETURNING.
1989
1990 .. seealso::
1991
1992 :meth:`.ValuesBase.return_defaults`
1993
1994 """
1995
1996 if self.context.executemany:
1997 raise exc.InvalidRequestError(
1998 "This statement was an executemany call; if return defaults "
1999 "is supported, please use .returned_defaults_rows."
2000 )
2001
2002 rows = self.context.returned_default_rows
2003 if rows:
2004 return rows[0]
2005 else:
2006 return None
2007
2008 def lastrow_has_defaults(self):
2009 """Return ``lastrow_has_defaults()`` from the underlying
2010 :class:`.ExecutionContext`.
2011
2012 See :class:`.ExecutionContext` for details.
2013
2014 """
2015
2016 return self.context.lastrow_has_defaults()
2017
2018 def postfetch_cols(self):
2019 """Return ``postfetch_cols()`` from the underlying
2020 :class:`.ExecutionContext`.
2021
2022 See :class:`.ExecutionContext` for details.
2023
2024 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
2025 statement is not a compiled expression construct
2026 or is not an insert() or update() construct.
2027
2028 """
2029
2030 if not self.context.compiled:
2031 raise exc.InvalidRequestError(
2032 "Statement is not a compiled expression construct."
2033 )
2034 elif not self.context.isinsert and not self.context.isupdate:
2035 raise exc.InvalidRequestError(
2036 "Statement is not an insert() or update() "
2037 "expression construct."
2038 )
2039 return self.context.postfetch_cols
2040
2041 def prefetch_cols(self):
2042 """Return ``prefetch_cols()`` from the underlying
2043 :class:`.ExecutionContext`.
2044
2045 See :class:`.ExecutionContext` for details.
2046
2047 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
2048 statement is not a compiled expression construct
2049 or is not an insert() or update() construct.
2050
2051 """
2052
2053 if not self.context.compiled:
2054 raise exc.InvalidRequestError(
2055 "Statement is not a compiled expression construct."
2056 )
2057 elif not self.context.isinsert and not self.context.isupdate:
2058 raise exc.InvalidRequestError(
2059 "Statement is not an insert() or update() "
2060 "expression construct."
2061 )
2062 return self.context.prefetch_cols
2063
2064 def supports_sane_rowcount(self):
2065 """Return ``supports_sane_rowcount`` from the dialect.
2066
2067 See :attr:`_engine.CursorResult.rowcount` for background.
2068
2069 """
2070
2071 return self.dialect.supports_sane_rowcount
2072
2073 def supports_sane_multi_rowcount(self):
2074 """Return ``supports_sane_multi_rowcount`` from the dialect.
2075
2076 See :attr:`_engine.CursorResult.rowcount` for background.
2077
2078 """
2079
2080 return self.dialect.supports_sane_multi_rowcount
2081
2082 @util.memoized_property
2083 def rowcount(self) -> int:
2084 """Return the 'rowcount' for this result.
2085
2086 The primary purpose of 'rowcount' is to report the number of rows
2087 matched by the WHERE criterion of an UPDATE or DELETE statement
2088 executed once (i.e. for a single parameter set), which may then be
2089 compared to the number of rows expected to be updated or deleted as a
2090 means of asserting data integrity.
2091
2092 This attribute is transferred from the ``cursor.rowcount`` attribute
2093 of the DBAPI before the cursor is closed, to support DBAPIs that
2094 don't make this value available after cursor close. Some DBAPIs may
2095 offer meaningful values for other kinds of statements, such as INSERT
2096 and SELECT statements as well. In order to retrieve ``cursor.rowcount``
2097 for these statements, set the
2098 :paramref:`.Connection.execution_options.preserve_rowcount`
2099 execution option to True, which will cause the ``cursor.rowcount``
2100 value to be unconditionally memoized before any results are returned
2101 or the cursor is closed, regardless of statement type.
2102
2103 For cases where the DBAPI does not support rowcount for a particular
2104 kind of statement and/or execution, the returned value will be ``-1``,
2105 which is delivered directly from the DBAPI and is part of :pep:`249`.
2106 All DBAPIs should support rowcount for single-parameter-set
2107 UPDATE and DELETE statements, however.
2108
2109 .. note::
2110
2111 Notes regarding :attr:`_engine.CursorResult.rowcount`:
2112
2113
2114 * This attribute returns the number of rows *matched*,
2115 which is not necessarily the same as the number of rows
2116 that were actually *modified*. For example, an UPDATE statement
2117 may have no net change on a given row if the SET values
2118 given are the same as those present in the row already.
2119 Such a row would be matched but not modified.
2120 On backends that feature both styles, such as MySQL,
2121 rowcount is configured to return the match
2122 count in all cases.
2123
2124 * :attr:`_engine.CursorResult.rowcount` in the default case is
2125 *only* useful in conjunction with an UPDATE or DELETE statement,
2126 and only with a single set of parameters. For other kinds of
2127 statements, SQLAlchemy will not attempt to pre-memoize the value
2128 unless the
2129 :paramref:`.Connection.execution_options.preserve_rowcount`
2130 execution option is used. Note that contrary to :pep:`249`, many
2131 DBAPIs do not support rowcount values for statements that are not
2132 UPDATE or DELETE, particularly when rows are being returned which
2133 are not fully pre-buffered. DBAPIs that dont support rowcount
2134 for a particular kind of statement should return the value ``-1``
2135 for such statements.
2136
2137 * :attr:`_engine.CursorResult.rowcount` may not be meaningful
2138 when executing a single statement with multiple parameter sets
2139 (i.e. an :term:`executemany`). Most DBAPIs do not sum "rowcount"
2140 values across multiple parameter sets and will return ``-1``
2141 when accessed.
2142
2143 * SQLAlchemy's :ref:`engine_insertmanyvalues` feature does support
2144 a correct population of :attr:`_engine.CursorResult.rowcount`
2145 when the :paramref:`.Connection.execution_options.preserve_rowcount`
2146 execution option is set to True.
2147
2148 * Statements that use RETURNING may not support rowcount, returning
2149 a ``-1`` value instead.
2150
2151 .. seealso::
2152
2153 :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial`
2154
2155 :paramref:`.Connection.execution_options.preserve_rowcount`
2156
2157 """ # noqa: E501
2158 try:
2159 return self.context.rowcount
2160 except BaseException as e:
2161 self.cursor_strategy.handle_exception(self, self.cursor, e)
2162 raise # not called
2163
2164 @property
2165 def lastrowid(self):
2166 """Return the 'lastrowid' accessor on the DBAPI cursor.
2167
2168 This is a DBAPI specific method and is only functional
2169 for those backends which support it, for statements
2170 where it is appropriate. It's behavior is not
2171 consistent across backends.
2172
2173 Usage of this method is normally unnecessary when
2174 using insert() expression constructs; the
2175 :attr:`~CursorResult.inserted_primary_key` attribute provides a
2176 tuple of primary key values for a newly inserted row,
2177 regardless of database backend.
2178
2179 """
2180 try:
2181 return self.context.get_lastrowid()
2182 except BaseException as e:
2183 self.cursor_strategy.handle_exception(self, self.cursor, e)
2184
2185 @property
2186 def returns_rows(self):
2187 """True if this :class:`_engine.CursorResult` returns zero or more
2188 rows.
2189
2190 I.e. if it is legal to call the methods
2191 :meth:`_engine.CursorResult.fetchone`,
2192 :meth:`_engine.CursorResult.fetchmany`
2193 :meth:`_engine.CursorResult.fetchall`.
2194
2195 Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
2196 always be synonymous with whether or not the DBAPI cursor had a
2197 ``.description`` attribute, indicating the presence of result columns,
2198 noting that a cursor that returns zero rows still has a
2199 ``.description`` if a row-returning statement was emitted.
2200
2201 This attribute should be True for all results that are against
2202 SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
2203 that use RETURNING. For INSERT/UPDATE/DELETE statements that were
2204 not using RETURNING, the value will usually be False, however
2205 there are some dialect-specific exceptions to this, such as when
2206 using the MSSQL / pyodbc dialect a SELECT is emitted inline in
2207 order to retrieve an inserted primary key value.
2208
2209
2210 """
2211 return self._metadata.returns_rows
2212
2213 @property
2214 def is_insert(self):
2215 """True if this :class:`_engine.CursorResult` is the result
2216 of a executing an expression language compiled
2217 :func:`_expression.insert` construct.
2218
2219 When True, this implies that the
2220 :attr:`inserted_primary_key` attribute is accessible,
2221 assuming the statement did not include
2222 a user defined "returning" construct.
2223
2224 """
2225 return self.context.isinsert
2226
2227 def _fetchiter_impl(self):
2228 fetchone = self.cursor_strategy.fetchone
2229
2230 while True:
2231 row = fetchone(self, self.cursor)
2232 if row is None:
2233 break
2234 yield row
2235
2236 def _fetchone_impl(self, hard_close=False):
2237 return self.cursor_strategy.fetchone(self, self.cursor, hard_close)
2238
2239 def _fetchall_impl(self):
2240 return self.cursor_strategy.fetchall(self, self.cursor)
2241
2242 def _fetchmany_impl(self, size=None):
2243 return self.cursor_strategy.fetchmany(self, self.cursor, size)
2244
2245 def _raw_row_iterator(self):
2246 return self._fetchiter_impl()
2247
2248 def merge(
2249 self, *others: Result[Unpack[TupleAny]]
2250 ) -> MergedResult[Unpack[TupleAny]]:
2251 merged_result = super().merge(*others)
2252 if self.context._has_rowcount:
2253 merged_result.rowcount = sum(
2254 cast("CursorResult[Any]", result).rowcount
2255 for result in (self,) + others
2256 )
2257 return merged_result
2258
2259 def close(self) -> Any:
2260 """Close this :class:`_engine.CursorResult`.
2261
2262 This closes out the underlying DBAPI cursor corresponding to the
2263 statement execution, if one is still present. Note that the DBAPI
2264 cursor is automatically released when the :class:`_engine.CursorResult`
2265 exhausts all available rows. :meth:`_engine.CursorResult.close` is
2266 generally an optional method except in the case when discarding a
2267 :class:`_engine.CursorResult` that still has additional rows pending
2268 for fetch.
2269
2270 After this method is called, it is no longer valid to call upon
2271 the fetch methods, which will raise a :class:`.ResourceClosedError`
2272 on subsequent use.
2273
2274 .. seealso::
2275
2276 :ref:`connections_toplevel`
2277
2278 """
2279 self._soft_close(hard=True)
2280
2281 @_generative
2282 def yield_per(self, num: int) -> Self:
2283 self._yield_per = num
2284 self.cursor_strategy.yield_per(self, self.cursor, num)
2285 return self
2286
2287
2288ResultProxy = CursorResult