1# engine/cursor.py
2# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
3# <see AUTHORS file>
4#
5# This module is part of SQLAlchemy and is released under
6# the MIT License: https://www.opensource.org/licenses/mit-license.php
7# mypy: allow-untyped-defs, allow-untyped-calls
8
9"""Define cursor-specific result set constructs including
10:class:`.CursorResult`."""
11
12
13from __future__ import annotations
14
15import collections
16import functools
17import operator
18import typing
19from typing import Any
20from typing import cast
21from typing import ClassVar
22from typing import Dict
23from typing import Iterable
24from typing import Iterator
25from typing import List
26from typing import Mapping
27from typing import NoReturn
28from typing import Optional
29from typing import Sequence
30from typing import Tuple
31from typing import TYPE_CHECKING
32from typing import Union
33
34from .result import IteratorResult
35from .result import MergedResult
36from .result import Result
37from .result import ResultMetaData
38from .result import SimpleResultMetaData
39from .result import tuplegetter
40from .row import Row
41from .. import exc
42from .. import util
43from ..sql import elements
44from ..sql import sqltypes
45from ..sql import util as sql_util
46from ..sql.base import _generative
47from ..sql.compiler import ResultColumnsEntry
48from ..sql.compiler import RM_NAME
49from ..sql.compiler import RM_OBJECTS
50from ..sql.compiler import RM_RENDERED_NAME
51from ..sql.compiler import RM_TYPE
52from ..sql.type_api import TypeEngine
53from ..util.typing import Literal
54from ..util.typing import Self
55from ..util.typing import TupleAny
56from ..util.typing import TypeVarTuple
57from ..util.typing import Unpack
58
59
60if typing.TYPE_CHECKING:
61 from .base import Connection
62 from .default import DefaultExecutionContext
63 from .interfaces import _DBAPICursorDescription
64 from .interfaces import DBAPICursor
65 from .interfaces import Dialect
66 from .interfaces import ExecutionContext
67 from .result import _KeyIndexType
68 from .result import _KeyMapRecType
69 from .result import _KeyMapType
70 from .result import _KeyType
71 from .result import _ProcessorsType
72 from .result import _TupleGetterType
73 from ..sql.type_api import _ResultProcessorType
74
75
76_Ts = TypeVarTuple("_Ts")
77
78
79# metadata entry tuple indexes.
80# using raw tuple is faster than namedtuple.
81# these match up to the positions in
82# _CursorKeyMapRecType
83MD_INDEX: Literal[0] = 0
84"""integer index in cursor.description
85
86"""
87
88MD_RESULT_MAP_INDEX: Literal[1] = 1
89"""integer index in compiled._result_columns"""
90
91MD_OBJECTS: Literal[2] = 2
92"""other string keys and ColumnElement obj that can match.
93
94This comes from compiler.RM_OBJECTS / compiler.ResultColumnsEntry.objects
95
96"""
97
98MD_LOOKUP_KEY: Literal[3] = 3
99"""string key we usually expect for key-based lookup
100
101this comes from compiler.RM_NAME / compiler.ResultColumnsEntry.name
102"""
103
104
105MD_RENDERED_NAME: Literal[4] = 4
106"""name that is usually in cursor.description
107
108this comes from compiler.RENDERED_NAME / compiler.ResultColumnsEntry.keyname
109"""
110
111
112MD_PROCESSOR: Literal[5] = 5
113"""callable to process a result value into a row"""
114
115MD_UNTRANSLATED: Literal[6] = 6
116"""raw name from cursor.description"""
117
118
119_CursorKeyMapRecType = Tuple[
120 Optional[int], # MD_INDEX, None means the record is ambiguously named
121 int, # MD_RESULT_MAP_INDEX
122 List[Any], # MD_OBJECTS
123 str, # MD_LOOKUP_KEY
124 str, # MD_RENDERED_NAME
125 Optional["_ResultProcessorType[Any]"], # MD_PROCESSOR
126 Optional[str], # MD_UNTRANSLATED
127]
128
129_CursorKeyMapType = Mapping["_KeyType", _CursorKeyMapRecType]
130
131# same as _CursorKeyMapRecType except the MD_INDEX value is definitely
132# not None
133_NonAmbigCursorKeyMapRecType = Tuple[
134 int,
135 int,
136 List[Any],
137 str,
138 str,
139 Optional["_ResultProcessorType[Any]"],
140 str,
141]
142
143
144class CursorResultMetaData(ResultMetaData):
145 """Result metadata for DBAPI cursors."""
146
147 __slots__ = (
148 "_keymap",
149 "_processors",
150 "_keys",
151 "_keymap_by_result_column_idx",
152 "_tuplefilter",
153 "_translated_indexes",
154 "_safe_for_cache",
155 "_unpickled",
156 "_key_to_index",
157 # don't need _unique_filters support here for now. Can be added
158 # if a need arises.
159 )
160
161 _keymap: _CursorKeyMapType
162 _processors: _ProcessorsType
163 _keymap_by_result_column_idx: Optional[Dict[int, _KeyMapRecType]]
164 _unpickled: bool
165 _safe_for_cache: bool
166 _translated_indexes: Optional[List[int]]
167
168 returns_rows: ClassVar[bool] = True
169
170 def _has_key(self, key: Any) -> bool:
171 return key in self._keymap
172
173 def _for_freeze(self) -> ResultMetaData:
174 return SimpleResultMetaData(
175 self._keys,
176 extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
177 )
178
179 def _make_new_metadata(
180 self,
181 *,
182 unpickled: bool,
183 processors: _ProcessorsType,
184 keys: Sequence[str],
185 keymap: _KeyMapType,
186 tuplefilter: Optional[_TupleGetterType],
187 translated_indexes: Optional[List[int]],
188 safe_for_cache: bool,
189 keymap_by_result_column_idx: Any,
190 ) -> Self:
191 new_obj = self.__class__.__new__(self.__class__)
192 new_obj._unpickled = unpickled
193 new_obj._processors = processors
194 new_obj._keys = keys
195 new_obj._keymap = keymap
196 new_obj._tuplefilter = tuplefilter
197 new_obj._translated_indexes = translated_indexes
198 new_obj._safe_for_cache = safe_for_cache
199 new_obj._keymap_by_result_column_idx = keymap_by_result_column_idx
200 new_obj._key_to_index = self._make_key_to_index(keymap, MD_INDEX)
201 return new_obj
202
203 def _remove_processors_and_tuple_filter(self) -> Self:
204 if self._tuplefilter:
205 proc = self._tuplefilter(self._processors)
206 else:
207 proc = self._processors
208 return self._make_new_metadata(
209 unpickled=self._unpickled,
210 processors=[None] * len(proc),
211 tuplefilter=None,
212 translated_indexes=None,
213 keymap={
214 key: value[0:5] + (None,) + value[6:]
215 for key, value in self._keymap.items()
216 },
217 keys=self._keys,
218 safe_for_cache=self._safe_for_cache,
219 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
220 )
221
222 def _splice_horizontally(self, other: CursorResultMetaData) -> Self:
223 keymap = dict(self._keymap)
224 offset = len(self._keys)
225 keymap.update(
226 {
227 key: (
228 # int index should be None for ambiguous key
229 (
230 value[0] + offset
231 if value[0] is not None and key not in keymap
232 else None
233 ),
234 value[1] + offset,
235 *value[2:],
236 )
237 for key, value in other._keymap.items()
238 }
239 )
240 self_tf = self._tuplefilter
241 other_tf = other._tuplefilter
242
243 proc: List[Any] = []
244 for pp, tf in [
245 (self._processors, self_tf),
246 (other._processors, other_tf),
247 ]:
248 proc.extend(pp if tf is None else tf(pp))
249
250 new_keys = [*self._keys, *other._keys]
251 assert len(proc) == len(new_keys)
252
253 return self._make_new_metadata(
254 unpickled=self._unpickled,
255 processors=proc,
256 tuplefilter=None,
257 translated_indexes=None,
258 keys=new_keys,
259 keymap=keymap,
260 safe_for_cache=self._safe_for_cache,
261 keymap_by_result_column_idx={
262 metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
263 for metadata_entry in keymap.values()
264 },
265 )
266
267 def _reduce(self, keys: Sequence[_KeyIndexType]) -> Self:
268 recs = list(self._metadata_for_keys(keys))
269
270 indexes = [rec[MD_INDEX] for rec in recs]
271 new_keys: List[str] = [rec[MD_LOOKUP_KEY] for rec in recs]
272
273 if self._translated_indexes:
274 indexes = [self._translated_indexes[idx] for idx in indexes]
275 tup = tuplegetter(*indexes)
276 new_recs = [(index,) + rec[1:] for index, rec in enumerate(recs)]
277
278 keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
279 # TODO: need unit test for:
280 # result = connection.execute("raw sql, no columns").scalars()
281 # without the "or ()" it's failing because MD_OBJECTS is None
282 keymap.update(
283 (e, new_rec)
284 for new_rec in new_recs
285 for e in new_rec[MD_OBJECTS] or ()
286 )
287
288 return self._make_new_metadata(
289 unpickled=self._unpickled,
290 processors=self._processors,
291 keys=new_keys,
292 tuplefilter=tup,
293 translated_indexes=indexes,
294 keymap=keymap, # type: ignore[arg-type]
295 safe_for_cache=self._safe_for_cache,
296 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
297 )
298
299 def _adapt_to_context(self, context: ExecutionContext) -> Self:
300 """When using a cached Compiled construct that has a _result_map,
301 for a new statement that used the cached Compiled, we need to ensure
302 the keymap has the Column objects from our new statement as keys.
303 So here we rewrite keymap with new entries for the new columns
304 as matched to those of the cached statement.
305
306 """
307
308 if not context.compiled or not context.compiled._result_columns:
309 return self
310
311 compiled_statement = context.compiled.statement
312 invoked_statement = context.invoked_statement
313
314 if TYPE_CHECKING:
315 assert isinstance(invoked_statement, elements.ClauseElement)
316
317 if compiled_statement is invoked_statement:
318 return self
319
320 assert invoked_statement is not None
321
322 # this is the most common path for Core statements when
323 # caching is used. In ORM use, this codepath is not really used
324 # as the _result_disable_adapt_to_context execution option is
325 # set by the ORM.
326
327 # make a copy and add the columns from the invoked statement
328 # to the result map.
329
330 keymap_by_position = self._keymap_by_result_column_idx
331
332 if keymap_by_position is None:
333 # first retrival from cache, this map will not be set up yet,
334 # initialize lazily
335 keymap_by_position = self._keymap_by_result_column_idx = {
336 metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
337 for metadata_entry in self._keymap.values()
338 }
339
340 return self._make_new_metadata(
341 keymap=self._keymap
342 | {
343 new: keymap_by_position[idx]
344 for idx, new in enumerate(
345 invoked_statement._all_selected_columns
346 )
347 if idx in keymap_by_position
348 },
349 unpickled=self._unpickled,
350 processors=self._processors,
351 tuplefilter=self._tuplefilter,
352 translated_indexes=None,
353 keys=self._keys,
354 safe_for_cache=self._safe_for_cache,
355 keymap_by_result_column_idx=self._keymap_by_result_column_idx,
356 )
357
358 def __init__(
359 self,
360 parent: CursorResult[Unpack[TupleAny]],
361 cursor_description: _DBAPICursorDescription,
362 *,
363 driver_column_names: bool = False,
364 num_sentinel_cols: int = 0,
365 ):
366 context = parent.context
367 if num_sentinel_cols > 0:
368 # this is slightly faster than letting tuplegetter use the indexes
369 self._tuplefilter = tuplefilter = operator.itemgetter(
370 slice(-num_sentinel_cols)
371 )
372 cursor_description = tuplefilter(cursor_description)
373 else:
374 self._tuplefilter = tuplefilter = None
375 self._translated_indexes = None
376 self._safe_for_cache = self._unpickled = False
377
378 if context.result_column_struct:
379 (
380 result_columns,
381 cols_are_ordered,
382 textual_ordered,
383 ad_hoc_textual,
384 loose_column_name_matching,
385 ) = context.result_column_struct
386 if tuplefilter is not None:
387 result_columns = tuplefilter(result_columns)
388 num_ctx_cols = len(result_columns)
389 else:
390 result_columns = cols_are_ordered = ( # type: ignore
391 num_ctx_cols
392 ) = ad_hoc_textual = loose_column_name_matching = (
393 textual_ordered
394 ) = False
395
396 # merge cursor.description with the column info
397 # present in the compiled structure, if any
398 raw = self._merge_cursor_description(
399 context,
400 cursor_description,
401 result_columns,
402 num_ctx_cols,
403 cols_are_ordered,
404 textual_ordered,
405 ad_hoc_textual,
406 loose_column_name_matching,
407 driver_column_names,
408 )
409
410 # processors in key order which are used when building up
411 # a row
412 self._processors = [
413 metadata_entry[MD_PROCESSOR] for metadata_entry in raw
414 ]
415 if num_sentinel_cols > 0:
416 # add the number of sentinel columns since these are passed
417 # to the tuplefilters before being used
418 self._processors.extend([None] * num_sentinel_cols)
419
420 # this is used when using this ResultMetaData in a Core-only cache
421 # retrieval context. it's initialized on first cache retrieval
422 # when the _result_disable_adapt_to_context execution option
423 # (which the ORM generally sets) is not set.
424 self._keymap_by_result_column_idx = None
425
426 # for compiled SQL constructs, copy additional lookup keys into
427 # the key lookup map, such as Column objects, labels,
428 # column keys and other names
429 if num_ctx_cols:
430 # keymap by primary string...
431 by_key = {
432 metadata_entry[MD_LOOKUP_KEY]: metadata_entry
433 for metadata_entry in raw
434 }
435
436 if len(by_key) != num_ctx_cols:
437 # if by-primary-string dictionary smaller than
438 # number of columns, assume we have dupes; (this check
439 # is also in place if string dictionary is bigger, as
440 # can occur when '*' was used as one of the compiled columns,
441 # which may or may not be suggestive of dupes), rewrite
442 # dupe records with "None" for index which results in
443 # ambiguous column exception when accessed.
444 #
445 # this is considered to be the less common case as it is not
446 # common to have dupe column keys in a SELECT statement.
447 #
448 # new in 1.4: get the complete set of all possible keys,
449 # strings, objects, whatever, that are dupes across two
450 # different records, first.
451 index_by_key: Dict[Any, Any] = {}
452 dupes = set()
453 for metadata_entry in raw:
454 for key in (metadata_entry[MD_RENDERED_NAME],) + (
455 metadata_entry[MD_OBJECTS] or ()
456 ):
457 idx = metadata_entry[MD_INDEX]
458 # if this key has been associated with more than one
459 # positional index, it's a dupe
460 if index_by_key.setdefault(key, idx) != idx:
461 dupes.add(key)
462
463 # then put everything we have into the keymap excluding only
464 # those keys that are dupes.
465 self._keymap = {
466 obj_elem: metadata_entry
467 for metadata_entry in raw
468 if metadata_entry[MD_OBJECTS]
469 for obj_elem in metadata_entry[MD_OBJECTS]
470 if obj_elem not in dupes
471 }
472
473 # then for the dupe keys, put the "ambiguous column"
474 # record into by_key.
475 by_key.update(
476 {
477 key: (None, None, [], key, key, None, None)
478 for key in dupes
479 }
480 )
481
482 else:
483 # no dupes - copy secondary elements from compiled
484 # columns into self._keymap. this is the most common
485 # codepath for Core / ORM statement executions before the
486 # result metadata is cached
487 self._keymap = {
488 obj_elem: metadata_entry
489 for metadata_entry in raw
490 if metadata_entry[MD_OBJECTS]
491 for obj_elem in metadata_entry[MD_OBJECTS]
492 }
493 # update keymap with primary string names taking
494 # precedence
495 self._keymap.update(by_key)
496 else:
497 # no compiled objects to map, just create keymap by primary string
498 self._keymap = {
499 metadata_entry[MD_LOOKUP_KEY]: metadata_entry
500 for metadata_entry in raw
501 }
502
503 # update keymap with "translated" names.
504 # the "translated" name thing has a long history:
505 # 1. originally, it was used to fix an issue in very old SQLite
506 # versions prior to 3.10.0. This code is still there in the
507 # sqlite dialect.
508 # 2. Next, the pyhive third party dialect started using this hook
509 # for some driver related issue on their end.
510 # 3. Most recently, the "driver_column_names" execution option has
511 # taken advantage of this hook to get raw DBAPI col names in the
512 # result keys without disrupting the usual merge process.
513
514 if driver_column_names or (
515 not num_ctx_cols and context._translate_colname
516 ):
517 self._keymap.update(
518 {
519 metadata_entry[MD_UNTRANSLATED]: self._keymap[
520 metadata_entry[MD_LOOKUP_KEY]
521 ]
522 for metadata_entry in raw
523 if metadata_entry[MD_UNTRANSLATED]
524 }
525 )
526
527 self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
528
529 def _merge_cursor_description(
530 self,
531 context,
532 cursor_description,
533 result_columns,
534 num_ctx_cols,
535 cols_are_ordered,
536 textual_ordered,
537 ad_hoc_textual,
538 loose_column_name_matching,
539 driver_column_names,
540 ):
541 """Merge a cursor.description with compiled result column information.
542
543 There are at least four separate strategies used here, selected
544 depending on the type of SQL construct used to start with.
545
546 The most common case is that of the compiled SQL expression construct,
547 which generated the column names present in the raw SQL string and
548 which has the identical number of columns as were reported by
549 cursor.description. In this case, we assume a 1-1 positional mapping
550 between the entries in cursor.description and the compiled object.
551 This is also the most performant case as we disregard extracting /
552 decoding the column names present in cursor.description since we
553 already have the desired name we generated in the compiled SQL
554 construct.
555
556 The next common case is that of the completely raw string SQL,
557 such as passed to connection.execute(). In this case we have no
558 compiled construct to work with, so we extract and decode the
559 names from cursor.description and index those as the primary
560 result row target keys.
561
562 The remaining fairly common case is that of the textual SQL
563 that includes at least partial column information; this is when
564 we use a :class:`_expression.TextualSelect` construct.
565 This construct may have
566 unordered or ordered column information. In the ordered case, we
567 merge the cursor.description and the compiled construct's information
568 positionally, and warn if there are additional description names
569 present, however we still decode the names in cursor.description
570 as we don't have a guarantee that the names in the columns match
571 on these. In the unordered case, we match names in cursor.description
572 to that of the compiled construct based on name matching.
573 In both of these cases, the cursor.description names and the column
574 expression objects and names are indexed as result row target keys.
575
576 The final case is much less common, where we have a compiled
577 non-textual SQL expression construct, but the number of columns
578 in cursor.description doesn't match what's in the compiled
579 construct. We make the guess here that there might be textual
580 column expressions in the compiled construct that themselves include
581 a comma in them causing them to split. We do the same name-matching
582 as with textual non-ordered columns.
583
584 The name-matched system of merging is the same as that used by
585 SQLAlchemy for all cases up through the 0.9 series. Positional
586 matching for compiled SQL expressions was introduced in 1.0 as a
587 major performance feature, and positional matching for textual
588 :class:`_expression.TextualSelect` objects in 1.1.
589 As name matching is no longer
590 a common case, it was acceptable to factor it into smaller generator-
591 oriented methods that are easier to understand, but incur slightly
592 more performance overhead.
593
594 """
595
596 if (
597 num_ctx_cols
598 and cols_are_ordered
599 and not textual_ordered
600 and num_ctx_cols == len(cursor_description)
601 and not driver_column_names
602 ):
603 self._keys = [elem[0] for elem in result_columns]
604 # pure positional 1-1 case; doesn't need to read
605 # the names from cursor.description
606
607 # most common case for Core and ORM
608
609 # this metadata is safe to
610 # cache because we are guaranteed
611 # to have the columns in the same order for new executions
612 self._safe_for_cache = True
613
614 return [
615 (
616 idx,
617 idx,
618 rmap_entry[RM_OBJECTS],
619 rmap_entry[RM_NAME],
620 rmap_entry[RM_RENDERED_NAME],
621 context.get_result_processor(
622 rmap_entry[RM_TYPE],
623 rmap_entry[RM_RENDERED_NAME],
624 cursor_description[idx][1],
625 ),
626 None,
627 )
628 for idx, rmap_entry in enumerate(result_columns)
629 ]
630 else:
631 # name-based or text-positional cases, where we need
632 # to read cursor.description names
633
634 if textual_ordered or (
635 ad_hoc_textual and len(cursor_description) == num_ctx_cols
636 ):
637 self._safe_for_cache = not driver_column_names
638 # textual positional case
639 raw_iterator = self._merge_textual_cols_by_position(
640 context,
641 cursor_description,
642 result_columns,
643 driver_column_names,
644 )
645 elif num_ctx_cols:
646 # compiled SQL with a mismatch of description cols
647 # vs. compiled cols, or textual w/ unordered columns
648 # the order of columns can change if the query is
649 # against a "select *", so not safe to cache
650 self._safe_for_cache = False
651 raw_iterator = self._merge_cols_by_name(
652 context,
653 cursor_description,
654 result_columns,
655 loose_column_name_matching,
656 driver_column_names,
657 )
658 else:
659 # no compiled SQL, just a raw string, order of columns
660 # can change for "select *"
661 self._safe_for_cache = False
662 raw_iterator = self._merge_cols_by_none(
663 context, cursor_description, driver_column_names
664 )
665
666 return [
667 (
668 idx,
669 ridx,
670 obj,
671 cursor_colname,
672 cursor_colname,
673 context.get_result_processor(
674 mapped_type, cursor_colname, coltype
675 ),
676 untranslated,
677 )
678 for (
679 idx,
680 ridx,
681 cursor_colname,
682 mapped_type,
683 coltype,
684 obj,
685 untranslated,
686 ) in raw_iterator
687 ]
688
689 def _colnames_from_description(
690 self, context, cursor_description, driver_column_names
691 ):
692 """Extract column names and data types from a cursor.description.
693
694 Applies unicode decoding, column translation, "normalization",
695 and case sensitivity rules to the names based on the dialect.
696
697 """
698 dialect = context.dialect
699 translate_colname = context._translate_colname
700 normalize_name = (
701 dialect.normalize_name if dialect.requires_name_normalize else None
702 )
703
704 untranslated = None
705
706 for idx, rec in enumerate(cursor_description):
707 colname = unnormalized = rec[0]
708 coltype = rec[1]
709
710 if translate_colname:
711 # a None here for "untranslated" means "the dialect did not
712 # change the column name and the untranslated case can be
713 # ignored". otherwise "untranslated" is expected to be the
714 # original, unchanged colname (e.g. is == to "unnormalized")
715 colname, untranslated = translate_colname(colname)
716
717 assert untranslated is None or untranslated == unnormalized
718
719 if normalize_name:
720 colname = normalize_name(colname)
721
722 if driver_column_names:
723 yield idx, colname, unnormalized, unnormalized, coltype
724
725 else:
726 yield idx, colname, unnormalized, untranslated, coltype
727
728 def _merge_textual_cols_by_position(
729 self, context, cursor_description, result_columns, driver_column_names
730 ):
731 num_ctx_cols = len(result_columns)
732
733 if num_ctx_cols > len(cursor_description):
734 util.warn(
735 "Number of columns in textual SQL (%d) is "
736 "smaller than number of columns requested (%d)"
737 % (num_ctx_cols, len(cursor_description))
738 )
739 seen = set()
740
741 self._keys = []
742
743 uses_denormalize = context.dialect.requires_name_normalize
744 for (
745 idx,
746 colname,
747 unnormalized,
748 untranslated,
749 coltype,
750 ) in self._colnames_from_description(
751 context, cursor_description, driver_column_names
752 ):
753 if idx < num_ctx_cols:
754 ctx_rec = result_columns[idx]
755 obj = ctx_rec[RM_OBJECTS]
756 ridx = idx
757 mapped_type = ctx_rec[RM_TYPE]
758 if obj[0] in seen:
759 raise exc.InvalidRequestError(
760 "Duplicate column expression requested "
761 "in textual SQL: %r" % obj[0]
762 )
763 seen.add(obj[0])
764
765 # special check for all uppercase unnormalized name;
766 # use the unnormalized name as the key.
767 # see #10788
768 # if these names don't match, then we still honor the
769 # cursor.description name as the key and not what the
770 # Column has, see
771 # test_resultset.py::PositionalTextTest::test_via_column
772 if (
773 uses_denormalize
774 and unnormalized == ctx_rec[RM_RENDERED_NAME]
775 ):
776 result_name = unnormalized
777 else:
778 result_name = colname
779 else:
780 mapped_type = sqltypes.NULLTYPE
781 obj = None
782 ridx = None
783
784 result_name = colname
785
786 if driver_column_names:
787 assert untranslated is not None
788 self._keys.append(untranslated)
789 else:
790 self._keys.append(result_name)
791
792 yield (
793 idx,
794 ridx,
795 result_name,
796 mapped_type,
797 coltype,
798 obj,
799 untranslated,
800 )
801
802 def _merge_cols_by_name(
803 self,
804 context,
805 cursor_description,
806 result_columns,
807 loose_column_name_matching,
808 driver_column_names,
809 ):
810 match_map = self._create_description_match_map(
811 result_columns, loose_column_name_matching
812 )
813 mapped_type: TypeEngine[Any]
814
815 self._keys = []
816
817 for (
818 idx,
819 colname,
820 unnormalized,
821 untranslated,
822 coltype,
823 ) in self._colnames_from_description(
824 context, cursor_description, driver_column_names
825 ):
826 try:
827 ctx_rec = match_map[colname]
828 except KeyError:
829 mapped_type = sqltypes.NULLTYPE
830 obj = None
831 result_columns_idx = None
832 else:
833 obj = ctx_rec[1]
834 mapped_type = ctx_rec[2]
835 result_columns_idx = ctx_rec[3]
836
837 if driver_column_names:
838 assert untranslated is not None
839 self._keys.append(untranslated)
840 else:
841 self._keys.append(colname)
842 yield (
843 idx,
844 result_columns_idx,
845 colname,
846 mapped_type,
847 coltype,
848 obj,
849 untranslated,
850 )
851
852 @classmethod
853 def _create_description_match_map(
854 cls,
855 result_columns: List[ResultColumnsEntry],
856 loose_column_name_matching: bool = False,
857 ) -> Dict[
858 Union[str, object], Tuple[str, Tuple[Any, ...], TypeEngine[Any], int]
859 ]:
860 """when matching cursor.description to a set of names that are present
861 in a Compiled object, as is the case with TextualSelect, get all the
862 names we expect might match those in cursor.description.
863 """
864
865 d: Dict[
866 Union[str, object],
867 Tuple[str, Tuple[Any, ...], TypeEngine[Any], int],
868 ] = {}
869 for ridx, elem in enumerate(result_columns):
870 key = elem[RM_RENDERED_NAME]
871
872 if key in d:
873 # conflicting keyname - just add the column-linked objects
874 # to the existing record. if there is a duplicate column
875 # name in the cursor description, this will allow all of those
876 # objects to raise an ambiguous column error
877 e_name, e_obj, e_type, e_ridx = d[key]
878 d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx
879 else:
880 d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx)
881
882 if loose_column_name_matching:
883 # when using a textual statement with an unordered set
884 # of columns that line up, we are expecting the user
885 # to be using label names in the SQL that match to the column
886 # expressions. Enable more liberal matching for this case;
887 # duplicate keys that are ambiguous will be fixed later.
888 for r_key in elem[RM_OBJECTS]:
889 d.setdefault(
890 r_key,
891 (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx),
892 )
893 return d
894
895 def _merge_cols_by_none(
896 self, context, cursor_description, driver_column_names
897 ):
898 self._keys = []
899
900 for (
901 idx,
902 colname,
903 unnormalized,
904 untranslated,
905 coltype,
906 ) in self._colnames_from_description(
907 context, cursor_description, driver_column_names
908 ):
909
910 if driver_column_names:
911 assert untranslated is not None
912 self._keys.append(untranslated)
913 else:
914 self._keys.append(colname)
915
916 yield (
917 idx,
918 None,
919 colname,
920 sqltypes.NULLTYPE,
921 coltype,
922 None,
923 untranslated,
924 )
925
926 if not TYPE_CHECKING:
927
928 def _key_fallback(
929 self, key: Any, err: Optional[Exception], raiseerr: bool = True
930 ) -> Optional[NoReturn]:
931 if raiseerr:
932 if self._unpickled and isinstance(key, elements.ColumnElement):
933 raise exc.NoSuchColumnError(
934 "Row was unpickled; lookup by ColumnElement "
935 "is unsupported"
936 ) from err
937 else:
938 raise exc.NoSuchColumnError(
939 "Could not locate column in row for column '%s'"
940 % util.string_or_unprintable(key)
941 ) from err
942 else:
943 return None
944
945 def _raise_for_ambiguous_column_name(self, rec):
946 raise exc.InvalidRequestError(
947 "Ambiguous column name '%s' in "
948 "result set column descriptions" % rec[MD_LOOKUP_KEY]
949 )
950
951 def _index_for_key(self, key: Any, raiseerr: bool = True) -> Optional[int]:
952 # TODO: can consider pre-loading ints and negative ints
953 # into _keymap - also no coverage here
954 if isinstance(key, int):
955 key = self._keys[key]
956
957 try:
958 rec = self._keymap[key]
959 except KeyError as ke:
960 x = self._key_fallback(key, ke, raiseerr)
961 assert x is None
962 return None
963
964 index = rec[0]
965
966 if index is None:
967 self._raise_for_ambiguous_column_name(rec)
968 return index
969
970 def _indexes_for_keys(self, keys):
971 try:
972 return [self._keymap[key][0] for key in keys]
973 except KeyError as ke:
974 # ensure it raises
975 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
976
977 def _metadata_for_keys(
978 self, keys: Sequence[Any]
979 ) -> Iterator[_NonAmbigCursorKeyMapRecType]:
980 for key in keys:
981 if isinstance(key, int):
982 key = self._keys[key]
983
984 try:
985 rec = self._keymap[key]
986 except KeyError as ke:
987 # ensure it raises
988 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
989
990 index = rec[MD_INDEX]
991
992 if index is None:
993 self._raise_for_ambiguous_column_name(rec)
994
995 yield cast(_NonAmbigCursorKeyMapRecType, rec)
996
997 def __getstate__(self):
998 # TODO: consider serializing this as SimpleResultMetaData
999 return {
1000 "_keymap": {
1001 key: (
1002 rec[MD_INDEX],
1003 rec[MD_RESULT_MAP_INDEX],
1004 [],
1005 key,
1006 rec[MD_RENDERED_NAME],
1007 None,
1008 None,
1009 )
1010 for key, rec in self._keymap.items()
1011 if isinstance(key, (str, int))
1012 },
1013 "_keys": self._keys,
1014 "_translated_indexes": self._translated_indexes,
1015 }
1016
1017 def __setstate__(self, state):
1018 self._processors = [None for _ in range(len(state["_keys"]))]
1019 self._keymap = state["_keymap"]
1020 self._keymap_by_result_column_idx = None
1021 self._key_to_index = self._make_key_to_index(self._keymap, MD_INDEX)
1022 self._keys = state["_keys"]
1023 self._unpickled = True
1024 if state["_translated_indexes"]:
1025 translated_indexes: List[Any]
1026 self._translated_indexes = translated_indexes = state[
1027 "_translated_indexes"
1028 ]
1029 self._tuplefilter = tuplegetter(*translated_indexes)
1030 else:
1031 self._translated_indexes = self._tuplefilter = None
1032
1033
1034class ResultFetchStrategy:
1035 """Define a fetching strategy for a result object.
1036
1037
1038 .. versionadded:: 1.4
1039
1040 """
1041
1042 __slots__ = ()
1043
1044 alternate_cursor_description: Optional[_DBAPICursorDescription] = None
1045
1046 def soft_close(
1047 self,
1048 result: CursorResult[Unpack[TupleAny]],
1049 dbapi_cursor: Optional[DBAPICursor],
1050 ) -> None:
1051 raise NotImplementedError()
1052
1053 def hard_close(
1054 self,
1055 result: CursorResult[Unpack[TupleAny]],
1056 dbapi_cursor: Optional[DBAPICursor],
1057 ) -> None:
1058 raise NotImplementedError()
1059
1060 def yield_per(
1061 self,
1062 result: CursorResult[Unpack[TupleAny]],
1063 dbapi_cursor: Optional[DBAPICursor],
1064 num: int,
1065 ) -> None:
1066 return
1067
1068 def fetchone(
1069 self,
1070 result: CursorResult[Unpack[TupleAny]],
1071 dbapi_cursor: DBAPICursor,
1072 hard_close: bool = False,
1073 ) -> Any:
1074 raise NotImplementedError()
1075
1076 def fetchmany(
1077 self,
1078 result: CursorResult[Unpack[TupleAny]],
1079 dbapi_cursor: DBAPICursor,
1080 size: Optional[int] = None,
1081 ) -> Any:
1082 raise NotImplementedError()
1083
1084 def fetchall(
1085 self,
1086 result: CursorResult[Unpack[TupleAny]],
1087 dbapi_cursor: DBAPICursor,
1088 ) -> Any:
1089 raise NotImplementedError()
1090
1091 def handle_exception(
1092 self,
1093 result: CursorResult[Unpack[TupleAny]],
1094 dbapi_cursor: Optional[DBAPICursor],
1095 err: BaseException,
1096 ) -> NoReturn:
1097 raise err
1098
1099
1100class NoCursorFetchStrategy(ResultFetchStrategy):
1101 """Cursor strategy for a result that has no open cursor.
1102
1103 There are two varieties of this strategy, one for DQL and one for
1104 DML (and also DDL), each of which represent a result that had a cursor
1105 but no longer has one.
1106
1107 """
1108
1109 __slots__ = ()
1110
1111 def soft_close(self, result, dbapi_cursor):
1112 pass
1113
1114 def hard_close(self, result, dbapi_cursor):
1115 pass
1116
1117 def fetchone(self, result, dbapi_cursor, hard_close=False):
1118 return self._non_result(result, None)
1119
1120 def fetchmany(self, result, dbapi_cursor, size=None):
1121 return self._non_result(result, [])
1122
1123 def fetchall(self, result, dbapi_cursor):
1124 return self._non_result(result, [])
1125
1126 def _non_result(self, result, default, err=None):
1127 raise NotImplementedError()
1128
1129
1130class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
1131 """Cursor strategy for a DQL result that has no open cursor.
1132
1133 This is a result set that can return rows, i.e. for a SELECT, or for an
1134 INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
1135 where the cursor is closed and no rows remain available. The owning result
1136 object may or may not be "hard closed", which determines if the fetch
1137 methods send empty results or raise for closed result.
1138
1139 """
1140
1141 __slots__ = ()
1142
1143 def _non_result(self, result, default, err=None):
1144 if result.closed:
1145 raise exc.ResourceClosedError(
1146 "This result object is closed."
1147 ) from err
1148 else:
1149 return default
1150
1151
1152_NO_CURSOR_DQL = NoCursorDQLFetchStrategy()
1153
1154
1155class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
1156 """Cursor strategy for a DML result that has no open cursor.
1157
1158 This is a result set that does not return rows, i.e. for an INSERT,
1159 UPDATE, DELETE that does not include RETURNING.
1160
1161 """
1162
1163 __slots__ = ()
1164
1165 def _non_result(self, result, default, err=None):
1166 # we only expect to have a _NoResultMetaData() here right now.
1167 assert not result._metadata.returns_rows
1168 result._metadata._we_dont_return_rows(err)
1169
1170
1171_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
1172
1173
1174class CursorFetchStrategy(ResultFetchStrategy):
1175 """Call fetch methods from a DBAPI cursor.
1176
1177 Alternate versions of this class may instead buffer the rows from
1178 cursors or not use cursors at all.
1179
1180 """
1181
1182 __slots__ = ()
1183
1184 def soft_close(
1185 self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
1186 ) -> None:
1187 result.cursor_strategy = _NO_CURSOR_DQL
1188
1189 def hard_close(
1190 self, result: CursorResult[Any], dbapi_cursor: Optional[DBAPICursor]
1191 ) -> None:
1192 result.cursor_strategy = _NO_CURSOR_DQL
1193
1194 def handle_exception(
1195 self,
1196 result: CursorResult[Any],
1197 dbapi_cursor: Optional[DBAPICursor],
1198 err: BaseException,
1199 ) -> NoReturn:
1200 result.connection._handle_dbapi_exception(
1201 err, None, None, dbapi_cursor, result.context
1202 )
1203
1204 def yield_per(
1205 self,
1206 result: CursorResult[Any],
1207 dbapi_cursor: Optional[DBAPICursor],
1208 num: int,
1209 ) -> None:
1210 result.cursor_strategy = BufferedRowCursorFetchStrategy(
1211 dbapi_cursor,
1212 {"max_row_buffer": num},
1213 initial_buffer=collections.deque(),
1214 growth_factor=0,
1215 )
1216
1217 def fetchone(
1218 self,
1219 result: CursorResult[Any],
1220 dbapi_cursor: DBAPICursor,
1221 hard_close: bool = False,
1222 ) -> Any:
1223 try:
1224 row = dbapi_cursor.fetchone()
1225 if row is None:
1226 result._soft_close(hard=hard_close)
1227 return row
1228 except BaseException as e:
1229 self.handle_exception(result, dbapi_cursor, e)
1230
1231 def fetchmany(
1232 self,
1233 result: CursorResult[Any],
1234 dbapi_cursor: DBAPICursor,
1235 size: Optional[int] = None,
1236 ) -> Any:
1237 try:
1238 if size is None:
1239 l = dbapi_cursor.fetchmany()
1240 else:
1241 l = dbapi_cursor.fetchmany(size)
1242
1243 if not l:
1244 result._soft_close()
1245 return l
1246 except BaseException as e:
1247 self.handle_exception(result, dbapi_cursor, e)
1248
1249 def fetchall(
1250 self,
1251 result: CursorResult[Any],
1252 dbapi_cursor: DBAPICursor,
1253 ) -> Any:
1254 try:
1255 rows = dbapi_cursor.fetchall()
1256 result._soft_close()
1257 return rows
1258 except BaseException as e:
1259 self.handle_exception(result, dbapi_cursor, e)
1260
1261
1262_DEFAULT_FETCH = CursorFetchStrategy()
1263
1264
1265class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
1266 """A cursor fetch strategy with row buffering behavior.
1267
1268 This strategy buffers the contents of a selection of rows
1269 before ``fetchone()`` is called. This is to allow the results of
1270 ``cursor.description`` to be available immediately, when
1271 interfacing with a DB-API that requires rows to be consumed before
1272 this information is available (currently psycopg2, when used with
1273 server-side cursors).
1274
1275 The pre-fetching behavior fetches only one row initially, and then
1276 grows its buffer size by a fixed amount with each successive need
1277 for additional rows up the ``max_row_buffer`` size, which defaults
1278 to 1000::
1279
1280 with psycopg2_engine.connect() as conn:
1281
1282 result = conn.execution_options(
1283 stream_results=True, max_row_buffer=50
1284 ).execute(text("select * from table"))
1285
1286 .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
1287
1288 .. seealso::
1289
1290 :ref:`psycopg2_execution_options`
1291 """
1292
1293 __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
1294
1295 def __init__(
1296 self,
1297 dbapi_cursor,
1298 execution_options,
1299 growth_factor=5,
1300 initial_buffer=None,
1301 ):
1302 self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
1303
1304 if initial_buffer is not None:
1305 self._rowbuffer = initial_buffer
1306 else:
1307 self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
1308 self._growth_factor = growth_factor
1309
1310 if growth_factor:
1311 self._bufsize = min(self._max_row_buffer, self._growth_factor)
1312 else:
1313 self._bufsize = self._max_row_buffer
1314
1315 @classmethod
1316 def create(cls, result):
1317 return BufferedRowCursorFetchStrategy(
1318 result.cursor,
1319 result.context.execution_options,
1320 )
1321
1322 def _buffer_rows(self, result, dbapi_cursor):
1323 """this is currently used only by fetchone()."""
1324
1325 size = self._bufsize
1326 try:
1327 if size < 1:
1328 new_rows = dbapi_cursor.fetchall()
1329 else:
1330 new_rows = dbapi_cursor.fetchmany(size)
1331 except BaseException as e:
1332 self.handle_exception(result, dbapi_cursor, e)
1333
1334 if not new_rows:
1335 return
1336 self._rowbuffer = collections.deque(new_rows)
1337 if self._growth_factor and size < self._max_row_buffer:
1338 self._bufsize = min(
1339 self._max_row_buffer, size * self._growth_factor
1340 )
1341
1342 def yield_per(self, result, dbapi_cursor, num):
1343 self._growth_factor = 0
1344 self._max_row_buffer = self._bufsize = num
1345
1346 def soft_close(self, result, dbapi_cursor):
1347 self._rowbuffer.clear()
1348 super().soft_close(result, dbapi_cursor)
1349
1350 def hard_close(self, result, dbapi_cursor):
1351 self._rowbuffer.clear()
1352 super().hard_close(result, dbapi_cursor)
1353
1354 def fetchone(self, result, dbapi_cursor, hard_close=False):
1355 if not self._rowbuffer:
1356 self._buffer_rows(result, dbapi_cursor)
1357 if not self._rowbuffer:
1358 try:
1359 result._soft_close(hard=hard_close)
1360 except BaseException as e:
1361 self.handle_exception(result, dbapi_cursor, e)
1362 return None
1363 return self._rowbuffer.popleft()
1364
1365 def fetchmany(self, result, dbapi_cursor, size=None):
1366 if size is None:
1367 return self.fetchall(result, dbapi_cursor)
1368
1369 rb = self._rowbuffer
1370 lb = len(rb)
1371 close = False
1372 if size > lb:
1373 try:
1374 new = dbapi_cursor.fetchmany(size - lb)
1375 except BaseException as e:
1376 self.handle_exception(result, dbapi_cursor, e)
1377 else:
1378 if not new:
1379 # defer closing since it may clear the row buffer
1380 close = True
1381 else:
1382 rb.extend(new)
1383
1384 res = [rb.popleft() for _ in range(min(size, len(rb)))]
1385 if close:
1386 result._soft_close()
1387 return res
1388
1389 def fetchall(self, result, dbapi_cursor):
1390 try:
1391 ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
1392 self._rowbuffer.clear()
1393 result._soft_close()
1394 return ret
1395 except BaseException as e:
1396 self.handle_exception(result, dbapi_cursor, e)
1397
1398
1399class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
1400 """A cursor strategy that buffers rows fully upon creation.
1401
1402 Used for operations where a result is to be delivered
1403 after the database conversation can not be continued,
1404 such as MSSQL INSERT...OUTPUT after an autocommit.
1405
1406 """
1407
1408 __slots__ = ("_rowbuffer", "alternate_cursor_description")
1409
1410 def __init__(
1411 self,
1412 dbapi_cursor: Optional[DBAPICursor],
1413 alternate_description: Optional[_DBAPICursorDescription] = None,
1414 initial_buffer: Optional[Iterable[Any]] = None,
1415 ):
1416 self.alternate_cursor_description = alternate_description
1417 if initial_buffer is not None:
1418 self._rowbuffer = collections.deque(initial_buffer)
1419 else:
1420 assert dbapi_cursor is not None
1421 self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
1422
1423 def yield_per(self, result, dbapi_cursor, num):
1424 pass
1425
1426 def soft_close(self, result, dbapi_cursor):
1427 self._rowbuffer.clear()
1428 super().soft_close(result, dbapi_cursor)
1429
1430 def hard_close(self, result, dbapi_cursor):
1431 self._rowbuffer.clear()
1432 super().hard_close(result, dbapi_cursor)
1433
1434 def fetchone(self, result, dbapi_cursor, hard_close=False):
1435 if self._rowbuffer:
1436 return self._rowbuffer.popleft()
1437 else:
1438 result._soft_close(hard=hard_close)
1439 return None
1440
1441 def fetchmany(self, result, dbapi_cursor, size=None):
1442 if size is None:
1443 return self.fetchall(result, dbapi_cursor)
1444
1445 rb = self._rowbuffer
1446 rows = [rb.popleft() for _ in range(min(size, len(rb)))]
1447 if not rows:
1448 result._soft_close()
1449 return rows
1450
1451 def fetchall(self, result, dbapi_cursor):
1452 ret = self._rowbuffer
1453 self._rowbuffer = collections.deque()
1454 result._soft_close()
1455 return ret
1456
1457
1458class _NoResultMetaData(ResultMetaData):
1459 __slots__ = ()
1460
1461 returns_rows = False
1462
1463 def _we_dont_return_rows(self, err=None):
1464 raise exc.ResourceClosedError(
1465 "This result object does not return rows. "
1466 "It has been closed automatically."
1467 ) from err
1468
1469 def _index_for_key(self, keys, raiseerr):
1470 self._we_dont_return_rows()
1471
1472 def _metadata_for_keys(self, key):
1473 self._we_dont_return_rows()
1474
1475 def _reduce(self, keys):
1476 self._we_dont_return_rows()
1477
1478 @property
1479 def _keymap(self): # type: ignore[override]
1480 self._we_dont_return_rows()
1481
1482 @property
1483 def _key_to_index(self): # type: ignore[override]
1484 self._we_dont_return_rows()
1485
1486 @property
1487 def _processors(self): # type: ignore[override]
1488 self._we_dont_return_rows()
1489
1490 @property
1491 def keys(self):
1492 self._we_dont_return_rows()
1493
1494
1495_NO_RESULT_METADATA = _NoResultMetaData()
1496
1497
1498def null_dml_result() -> IteratorResult[Any]:
1499 it: IteratorResult[Any] = IteratorResult(_NoResultMetaData(), iter([]))
1500 it._soft_close()
1501 return it
1502
1503
1504class CursorResult(Result[Unpack[_Ts]]):
1505 """A Result that is representing state from a DBAPI cursor.
1506
1507 .. versionchanged:: 1.4 The :class:`.CursorResult``
1508 class replaces the previous :class:`.ResultProxy` interface.
1509 This classes are based on the :class:`.Result` calling API
1510 which provides an updated usage model and calling facade for
1511 SQLAlchemy Core and SQLAlchemy ORM.
1512
1513 Returns database rows via the :class:`.Row` class, which provides
1514 additional API features and behaviors on top of the raw data returned by
1515 the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
1516 method, other kinds of objects may also be returned.
1517
1518 .. seealso::
1519
1520 :ref:`tutorial_selecting_data` - introductory material for accessing
1521 :class:`_engine.CursorResult` and :class:`.Row` objects.
1522
1523 """
1524
1525 __slots__ = (
1526 "context",
1527 "dialect",
1528 "cursor",
1529 "cursor_strategy",
1530 "_echo",
1531 "connection",
1532 )
1533
1534 _metadata: Union[CursorResultMetaData, _NoResultMetaData]
1535 _no_result_metadata = _NO_RESULT_METADATA
1536 _soft_closed: bool = False
1537 closed: bool = False
1538 _is_cursor = True
1539
1540 context: DefaultExecutionContext
1541 dialect: Dialect
1542 cursor_strategy: ResultFetchStrategy
1543 connection: Connection
1544
1545 def __init__(
1546 self,
1547 context: DefaultExecutionContext,
1548 cursor_strategy: ResultFetchStrategy,
1549 cursor_description: Optional[_DBAPICursorDescription],
1550 ):
1551 self.context = context
1552 self.dialect = context.dialect
1553 self.cursor = context.cursor
1554 self.cursor_strategy = cursor_strategy
1555 self.connection = context.root_connection
1556 self._echo = echo = (
1557 self.connection._echo and context.engine._should_log_debug()
1558 )
1559
1560 if cursor_description is not None:
1561 # inline of Result._row_getter(), set up an initial row
1562 # getter assuming no transformations will be called as this
1563 # is the most common case
1564
1565 metadata = self._init_metadata(context, cursor_description)
1566
1567 _make_row: Any
1568 proc = metadata._effective_processors
1569 tf = metadata._tuplefilter
1570 _make_row = functools.partial(
1571 Row,
1572 metadata,
1573 proc if tf is None or proc is None else tf(proc),
1574 metadata._key_to_index,
1575 )
1576 if tf is not None:
1577 _fixed_tf = tf # needed to make mypy happy...
1578
1579 def _sliced_row(raw_data):
1580 return _make_row(_fixed_tf(raw_data))
1581
1582 sliced_row = _sliced_row
1583 else:
1584 sliced_row = _make_row
1585
1586 if echo:
1587 log = self.context.connection._log_debug
1588
1589 def _log_row(row):
1590 log("Row %r", sql_util._repr_row(row))
1591 return row
1592
1593 self._row_logging_fn = _log_row
1594
1595 def _make_row_2(row):
1596 return _log_row(sliced_row(row))
1597
1598 make_row = _make_row_2
1599 else:
1600 make_row = sliced_row
1601 self._set_memoized_attribute("_row_getter", make_row)
1602
1603 else:
1604 assert context._num_sentinel_cols == 0
1605 self._metadata = self._no_result_metadata
1606
1607 def _init_metadata(
1608 self,
1609 context: DefaultExecutionContext,
1610 cursor_description: _DBAPICursorDescription,
1611 ) -> CursorResultMetaData:
1612 driver_column_names = context.execution_options.get(
1613 "driver_column_names", False
1614 )
1615 if context.compiled:
1616 compiled = context.compiled
1617
1618 metadata: CursorResultMetaData
1619
1620 if driver_column_names:
1621 # TODO: test this case
1622 metadata = CursorResultMetaData(
1623 self,
1624 cursor_description,
1625 driver_column_names=True,
1626 num_sentinel_cols=context._num_sentinel_cols,
1627 )
1628 assert not metadata._safe_for_cache
1629 elif compiled._cached_metadata:
1630 metadata = compiled._cached_metadata
1631 else:
1632 metadata = CursorResultMetaData(
1633 self,
1634 cursor_description,
1635 # the number of sentinel columns is stored on the context
1636 # but it's a characteristic of the compiled object
1637 # so it's ok to apply it to a cacheable metadata.
1638 num_sentinel_cols=context._num_sentinel_cols,
1639 )
1640 if metadata._safe_for_cache:
1641 compiled._cached_metadata = metadata
1642
1643 # result rewrite/ adapt step. this is to suit the case
1644 # when we are invoked against a cached Compiled object, we want
1645 # to rewrite the ResultMetaData to reflect the Column objects
1646 # that are in our current SQL statement object, not the one
1647 # that is associated with the cached Compiled object.
1648 # the Compiled object may also tell us to not
1649 # actually do this step; this is to support the ORM where
1650 # it is to produce a new Result object in any case, and will
1651 # be using the cached Column objects against this database result
1652 # so we don't want to rewrite them.
1653 #
1654 # Basically this step suits the use case where the end user
1655 # is using Core SQL expressions and is accessing columns in the
1656 # result row using row._mapping[table.c.column].
1657 if (
1658 not context.execution_options.get(
1659 "_result_disable_adapt_to_context", False
1660 )
1661 and compiled._result_columns
1662 and context.cache_hit is context.dialect.CACHE_HIT
1663 and compiled.statement is not context.invoked_statement # type: ignore[comparison-overlap] # noqa: E501
1664 ):
1665 metadata = metadata._adapt_to_context(context)
1666
1667 self._metadata = metadata
1668
1669 else:
1670 self._metadata = metadata = CursorResultMetaData(
1671 self,
1672 cursor_description,
1673 driver_column_names=driver_column_names,
1674 )
1675 if self._echo:
1676 context.connection._log_debug(
1677 "Col %r", tuple(x[0] for x in cursor_description)
1678 )
1679 return metadata
1680
1681 def _soft_close(self, hard=False):
1682 """Soft close this :class:`_engine.CursorResult`.
1683
1684 This releases all DBAPI cursor resources, but leaves the
1685 CursorResult "open" from a semantic perspective, meaning the
1686 fetchXXX() methods will continue to return empty results.
1687
1688 This method is called automatically when:
1689
1690 * all result rows are exhausted using the fetchXXX() methods.
1691 * cursor.description is None.
1692
1693 This method is **not public**, but is documented in order to clarify
1694 the "autoclose" process used.
1695
1696 .. seealso::
1697
1698 :meth:`_engine.CursorResult.close`
1699
1700
1701 """
1702
1703 if (not hard and self._soft_closed) or (hard and self.closed):
1704 return
1705
1706 if hard:
1707 self.closed = True
1708 self.cursor_strategy.hard_close(self, self.cursor)
1709 else:
1710 self.cursor_strategy.soft_close(self, self.cursor)
1711
1712 if not self._soft_closed:
1713 cursor = self.cursor
1714 self.cursor = None # type: ignore
1715 self.connection._safe_close_cursor(cursor)
1716 self._soft_closed = True
1717
1718 @property
1719 def inserted_primary_key_rows(self):
1720 """Return the value of
1721 :attr:`_engine.CursorResult.inserted_primary_key`
1722 as a row contained within a list; some dialects may support a
1723 multiple row form as well.
1724
1725 .. note:: As indicated below, in current SQLAlchemy versions this
1726 accessor is only useful beyond what's already supplied by
1727 :attr:`_engine.CursorResult.inserted_primary_key` when using the
1728 :ref:`postgresql_psycopg2` dialect. Future versions hope to
1729 generalize this feature to more dialects.
1730
1731 This accessor is added to support dialects that offer the feature
1732 that is currently implemented by the :ref:`psycopg2_executemany_mode`
1733 feature, currently **only the psycopg2 dialect**, which provides
1734 for many rows to be INSERTed at once while still retaining the
1735 behavior of being able to return server-generated primary key values.
1736
1737 * **When using the psycopg2 dialect, or other dialects that may support
1738 "fast executemany" style inserts in upcoming releases** : When
1739 invoking an INSERT statement while passing a list of rows as the
1740 second argument to :meth:`_engine.Connection.execute`, this accessor
1741 will then provide a list of rows, where each row contains the primary
1742 key value for each row that was INSERTed.
1743
1744 * **When using all other dialects / backends that don't yet support
1745 this feature**: This accessor is only useful for **single row INSERT
1746 statements**, and returns the same information as that of the
1747 :attr:`_engine.CursorResult.inserted_primary_key` within a
1748 single-element list. When an INSERT statement is executed in
1749 conjunction with a list of rows to be INSERTed, the list will contain
1750 one row per row inserted in the statement, however it will contain
1751 ``None`` for any server-generated values.
1752
1753 Future releases of SQLAlchemy will further generalize the
1754 "fast execution helper" feature of psycopg2 to suit other dialects,
1755 thus allowing this accessor to be of more general use.
1756
1757 .. versionadded:: 1.4
1758
1759 .. seealso::
1760
1761 :attr:`_engine.CursorResult.inserted_primary_key`
1762
1763 """
1764 if not self.context.compiled:
1765 raise exc.InvalidRequestError(
1766 "Statement is not a compiled expression construct."
1767 )
1768 elif not self.context.isinsert:
1769 raise exc.InvalidRequestError(
1770 "Statement is not an insert() expression construct."
1771 )
1772 elif self.context._is_explicit_returning:
1773 raise exc.InvalidRequestError(
1774 "Can't call inserted_primary_key "
1775 "when returning() "
1776 "is used."
1777 )
1778 return self.context.inserted_primary_key_rows
1779
1780 @property
1781 def inserted_primary_key(self):
1782 """Return the primary key for the row just inserted.
1783
1784 The return value is a :class:`_result.Row` object representing
1785 a named tuple of primary key values in the order in which the
1786 primary key columns are configured in the source
1787 :class:`_schema.Table`.
1788
1789 .. versionchanged:: 1.4.8 - the
1790 :attr:`_engine.CursorResult.inserted_primary_key`
1791 value is now a named tuple via the :class:`_result.Row` class,
1792 rather than a plain tuple.
1793
1794 This accessor only applies to single row :func:`_expression.insert`
1795 constructs which did not explicitly specify
1796 :meth:`_expression.Insert.returning`. Support for multirow inserts,
1797 while not yet available for most backends, would be accessed using
1798 the :attr:`_engine.CursorResult.inserted_primary_key_rows` accessor.
1799
1800 Note that primary key columns which specify a server_default clause, or
1801 otherwise do not qualify as "autoincrement" columns (see the notes at
1802 :class:`_schema.Column`), and were generated using the database-side
1803 default, will appear in this list as ``None`` unless the backend
1804 supports "returning" and the insert statement executed with the
1805 "implicit returning" enabled.
1806
1807 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1808 statement is not a compiled expression construct
1809 or is not an insert() construct.
1810
1811 """
1812
1813 if self.context.executemany:
1814 raise exc.InvalidRequestError(
1815 "This statement was an executemany call; if primary key "
1816 "returning is supported, please "
1817 "use .inserted_primary_key_rows."
1818 )
1819
1820 ikp = self.inserted_primary_key_rows
1821 if ikp:
1822 return ikp[0]
1823 else:
1824 return None
1825
1826 def last_updated_params(self):
1827 """Return the collection of updated parameters from this
1828 execution.
1829
1830 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1831 statement is not a compiled expression construct
1832 or is not an update() construct.
1833
1834 """
1835 if not self.context.compiled:
1836 raise exc.InvalidRequestError(
1837 "Statement is not a compiled expression construct."
1838 )
1839 elif not self.context.isupdate:
1840 raise exc.InvalidRequestError(
1841 "Statement is not an update() expression construct."
1842 )
1843 elif self.context.executemany:
1844 return self.context.compiled_parameters
1845 else:
1846 return self.context.compiled_parameters[0]
1847
1848 def last_inserted_params(self):
1849 """Return the collection of inserted parameters from this
1850 execution.
1851
1852 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1853 statement is not a compiled expression construct
1854 or is not an insert() construct.
1855
1856 """
1857 if not self.context.compiled:
1858 raise exc.InvalidRequestError(
1859 "Statement is not a compiled expression construct."
1860 )
1861 elif not self.context.isinsert:
1862 raise exc.InvalidRequestError(
1863 "Statement is not an insert() expression construct."
1864 )
1865 elif self.context.executemany:
1866 return self.context.compiled_parameters
1867 else:
1868 return self.context.compiled_parameters[0]
1869
1870 @property
1871 def returned_defaults_rows(self):
1872 """Return a list of rows each containing the values of default
1873 columns that were fetched using
1874 the :meth:`.ValuesBase.return_defaults` feature.
1875
1876 The return value is a list of :class:`.Row` objects.
1877
1878 .. versionadded:: 1.4
1879
1880 """
1881 return self.context.returned_default_rows
1882
1883 def splice_horizontally(
1884 self, other: CursorResult[Any]
1885 ) -> CursorResult[Any]:
1886 """Return a new :class:`.CursorResult` that "horizontally splices"
1887 together the rows of this :class:`.CursorResult` with that of another
1888 :class:`.CursorResult`.
1889
1890 .. tip:: This method is for the benefit of the SQLAlchemy ORM and is
1891 not intended for general use.
1892
1893 "horizontally splices" means that for each row in the first and second
1894 result sets, a new row that concatenates the two rows together is
1895 produced, which then becomes the new row. The incoming
1896 :class:`.CursorResult` must have the identical number of rows. It is
1897 typically expected that the two result sets come from the same sort
1898 order as well, as the result rows are spliced together based on their
1899 position in the result.
1900
1901 The expected use case here is so that multiple INSERT..RETURNING
1902 statements (which definitely need to be sorted) against different
1903 tables can produce a single result that looks like a JOIN of those two
1904 tables.
1905
1906 E.g.::
1907
1908 r1 = connection.execute(
1909 users.insert().returning(
1910 users.c.user_name, users.c.user_id, sort_by_parameter_order=True
1911 ),
1912 user_values,
1913 )
1914
1915 r2 = connection.execute(
1916 addresses.insert().returning(
1917 addresses.c.address_id,
1918 addresses.c.address,
1919 addresses.c.user_id,
1920 sort_by_parameter_order=True,
1921 ),
1922 address_values,
1923 )
1924
1925 rows = r1.splice_horizontally(r2).all()
1926 assert rows == [
1927 ("john", 1, 1, "foo@bar.com", 1),
1928 ("jack", 2, 2, "bar@bat.com", 2),
1929 ]
1930
1931 .. versionadded:: 2.0
1932
1933 .. seealso::
1934
1935 :meth:`.CursorResult.splice_vertically`
1936
1937
1938 """ # noqa: E501
1939
1940 clone: CursorResult[Any] = self._generate()
1941 assert clone is self # just to note
1942 assert isinstance(other._metadata, CursorResultMetaData)
1943 assert isinstance(self._metadata, CursorResultMetaData)
1944 self_tf = self._metadata._tuplefilter
1945 other_tf = other._metadata._tuplefilter
1946 clone._metadata = self._metadata._splice_horizontally(other._metadata)
1947
1948 total_rows = [
1949 tuple(r1 if self_tf is None else self_tf(r1))
1950 + tuple(r2 if other_tf is None else other_tf(r2))
1951 for r1, r2 in zip(
1952 list(self._raw_row_iterator()),
1953 list(other._raw_row_iterator()),
1954 )
1955 ]
1956
1957 clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
1958 None,
1959 initial_buffer=total_rows,
1960 )
1961 clone._reset_memoizations()
1962 return clone
1963
1964 def splice_vertically(self, other):
1965 """Return a new :class:`.CursorResult` that "vertically splices",
1966 i.e. "extends", the rows of this :class:`.CursorResult` with that of
1967 another :class:`.CursorResult`.
1968
1969 .. tip:: This method is for the benefit of the SQLAlchemy ORM and is
1970 not intended for general use.
1971
1972 "vertically splices" means the rows of the given result are appended to
1973 the rows of this cursor result. The incoming :class:`.CursorResult`
1974 must have rows that represent the identical list of columns in the
1975 identical order as they are in this :class:`.CursorResult`.
1976
1977 .. versionadded:: 2.0
1978
1979 .. seealso::
1980
1981 :meth:`.CursorResult.splice_horizontally`
1982
1983 """
1984 clone = self._generate()
1985 total_rows = list(self._raw_row_iterator()) + list(
1986 other._raw_row_iterator()
1987 )
1988
1989 clone.cursor_strategy = FullyBufferedCursorFetchStrategy(
1990 None,
1991 initial_buffer=total_rows,
1992 )
1993 clone._reset_memoizations()
1994 return clone
1995
1996 def _rewind(self, rows):
1997 """rewind this result back to the given rowset.
1998
1999 this is used internally for the case where an :class:`.Insert`
2000 construct combines the use of
2001 :meth:`.Insert.return_defaults` along with the
2002 "supplemental columns" feature.
2003
2004 NOTE: this method has not effect then an unique filter is applied
2005 to the result, meaning that no row will be returned.
2006
2007 """
2008
2009 if self._echo:
2010 self.context.connection._log_debug(
2011 "CursorResult rewound %d row(s)", len(rows)
2012 )
2013
2014 # the rows given are expected to be Row objects, so we
2015 # have to clear out processors which have already run on these
2016 # rows
2017 self._metadata = cast(
2018 CursorResultMetaData, self._metadata
2019 )._remove_processors_and_tuple_filter()
2020
2021 self.cursor_strategy = FullyBufferedCursorFetchStrategy(
2022 None,
2023 # TODO: if these are Row objects, can we save on not having to
2024 # re-make new Row objects out of them a second time? is that
2025 # what's actually happening right now? maybe look into this
2026 initial_buffer=rows,
2027 )
2028 self._reset_memoizations()
2029 return self
2030
2031 @property
2032 def returned_defaults(self):
2033 """Return the values of default columns that were fetched using
2034 the :meth:`.ValuesBase.return_defaults` feature.
2035
2036 The value is an instance of :class:`.Row`, or ``None``
2037 if :meth:`.ValuesBase.return_defaults` was not used or if the
2038 backend does not support RETURNING.
2039
2040 .. seealso::
2041
2042 :meth:`.ValuesBase.return_defaults`
2043
2044 """
2045
2046 if self.context.executemany:
2047 raise exc.InvalidRequestError(
2048 "This statement was an executemany call; if return defaults "
2049 "is supported, please use .returned_defaults_rows."
2050 )
2051
2052 rows = self.context.returned_default_rows
2053 if rows:
2054 return rows[0]
2055 else:
2056 return None
2057
2058 def lastrow_has_defaults(self):
2059 """Return ``lastrow_has_defaults()`` from the underlying
2060 :class:`.ExecutionContext`.
2061
2062 See :class:`.ExecutionContext` for details.
2063
2064 """
2065
2066 return self.context.lastrow_has_defaults()
2067
2068 def postfetch_cols(self):
2069 """Return ``postfetch_cols()`` from the underlying
2070 :class:`.ExecutionContext`.
2071
2072 See :class:`.ExecutionContext` for details.
2073
2074 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
2075 statement is not a compiled expression construct
2076 or is not an insert() or update() construct.
2077
2078 """
2079
2080 if not self.context.compiled:
2081 raise exc.InvalidRequestError(
2082 "Statement is not a compiled expression construct."
2083 )
2084 elif not self.context.isinsert and not self.context.isupdate:
2085 raise exc.InvalidRequestError(
2086 "Statement is not an insert() or update() "
2087 "expression construct."
2088 )
2089 return self.context.postfetch_cols
2090
2091 def prefetch_cols(self):
2092 """Return ``prefetch_cols()`` from the underlying
2093 :class:`.ExecutionContext`.
2094
2095 See :class:`.ExecutionContext` for details.
2096
2097 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
2098 statement is not a compiled expression construct
2099 or is not an insert() or update() construct.
2100
2101 """
2102
2103 if not self.context.compiled:
2104 raise exc.InvalidRequestError(
2105 "Statement is not a compiled expression construct."
2106 )
2107 elif not self.context.isinsert and not self.context.isupdate:
2108 raise exc.InvalidRequestError(
2109 "Statement is not an insert() or update() "
2110 "expression construct."
2111 )
2112 return self.context.prefetch_cols
2113
2114 def supports_sane_rowcount(self):
2115 """Return ``supports_sane_rowcount`` from the dialect.
2116
2117 See :attr:`_engine.CursorResult.rowcount` for background.
2118
2119 """
2120
2121 return self.dialect.supports_sane_rowcount
2122
2123 def supports_sane_multi_rowcount(self):
2124 """Return ``supports_sane_multi_rowcount`` from the dialect.
2125
2126 See :attr:`_engine.CursorResult.rowcount` for background.
2127
2128 """
2129
2130 return self.dialect.supports_sane_multi_rowcount
2131
2132 @util.memoized_property
2133 def rowcount(self) -> int:
2134 """Return the 'rowcount' for this result.
2135
2136 The primary purpose of 'rowcount' is to report the number of rows
2137 matched by the WHERE criterion of an UPDATE or DELETE statement
2138 executed once (i.e. for a single parameter set), which may then be
2139 compared to the number of rows expected to be updated or deleted as a
2140 means of asserting data integrity.
2141
2142 This attribute is transferred from the ``cursor.rowcount`` attribute
2143 of the DBAPI before the cursor is closed, to support DBAPIs that
2144 don't make this value available after cursor close. Some DBAPIs may
2145 offer meaningful values for other kinds of statements, such as INSERT
2146 and SELECT statements as well. In order to retrieve ``cursor.rowcount``
2147 for these statements, set the
2148 :paramref:`.Connection.execution_options.preserve_rowcount`
2149 execution option to True, which will cause the ``cursor.rowcount``
2150 value to be unconditionally memoized before any results are returned
2151 or the cursor is closed, regardless of statement type.
2152
2153 For cases where the DBAPI does not support rowcount for a particular
2154 kind of statement and/or execution, the returned value will be ``-1``,
2155 which is delivered directly from the DBAPI and is part of :pep:`249`.
2156 All DBAPIs should support rowcount for single-parameter-set
2157 UPDATE and DELETE statements, however.
2158
2159 .. note::
2160
2161 Notes regarding :attr:`_engine.CursorResult.rowcount`:
2162
2163
2164 * This attribute returns the number of rows *matched*,
2165 which is not necessarily the same as the number of rows
2166 that were actually *modified*. For example, an UPDATE statement
2167 may have no net change on a given row if the SET values
2168 given are the same as those present in the row already.
2169 Such a row would be matched but not modified.
2170 On backends that feature both styles, such as MySQL,
2171 rowcount is configured to return the match
2172 count in all cases.
2173
2174 * :attr:`_engine.CursorResult.rowcount` in the default case is
2175 *only* useful in conjunction with an UPDATE or DELETE statement,
2176 and only with a single set of parameters. For other kinds of
2177 statements, SQLAlchemy will not attempt to pre-memoize the value
2178 unless the
2179 :paramref:`.Connection.execution_options.preserve_rowcount`
2180 execution option is used. Note that contrary to :pep:`249`, many
2181 DBAPIs do not support rowcount values for statements that are not
2182 UPDATE or DELETE, particularly when rows are being returned which
2183 are not fully pre-buffered. DBAPIs that dont support rowcount
2184 for a particular kind of statement should return the value ``-1``
2185 for such statements.
2186
2187 * :attr:`_engine.CursorResult.rowcount` may not be meaningful
2188 when executing a single statement with multiple parameter sets
2189 (i.e. an :term:`executemany`). Most DBAPIs do not sum "rowcount"
2190 values across multiple parameter sets and will return ``-1``
2191 when accessed.
2192
2193 * SQLAlchemy's :ref:`engine_insertmanyvalues` feature does support
2194 a correct population of :attr:`_engine.CursorResult.rowcount`
2195 when the :paramref:`.Connection.execution_options.preserve_rowcount`
2196 execution option is set to True.
2197
2198 * Statements that use RETURNING may not support rowcount, returning
2199 a ``-1`` value instead.
2200
2201 .. seealso::
2202
2203 :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial`
2204
2205 :paramref:`.Connection.execution_options.preserve_rowcount`
2206
2207 """ # noqa: E501
2208 try:
2209 return self.context.rowcount
2210 except BaseException as e:
2211 self.cursor_strategy.handle_exception(self, self.cursor, e)
2212 raise # not called
2213
2214 @property
2215 def lastrowid(self):
2216 """Return the 'lastrowid' accessor on the DBAPI cursor.
2217
2218 This is a DBAPI specific method and is only functional
2219 for those backends which support it, for statements
2220 where it is appropriate. It's behavior is not
2221 consistent across backends.
2222
2223 Usage of this method is normally unnecessary when
2224 using insert() expression constructs; the
2225 :attr:`~CursorResult.inserted_primary_key` attribute provides a
2226 tuple of primary key values for a newly inserted row,
2227 regardless of database backend.
2228
2229 """
2230 try:
2231 return self.context.get_lastrowid()
2232 except BaseException as e:
2233 self.cursor_strategy.handle_exception(self, self.cursor, e)
2234
2235 @property
2236 def returns_rows(self):
2237 """True if this :class:`_engine.CursorResult` returns zero or more
2238 rows.
2239
2240 I.e. if it is legal to call the methods
2241 :meth:`_engine.CursorResult.fetchone`,
2242 :meth:`_engine.CursorResult.fetchmany`
2243 :meth:`_engine.CursorResult.fetchall`.
2244
2245 Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
2246 always be synonymous with whether or not the DBAPI cursor had a
2247 ``.description`` attribute, indicating the presence of result columns,
2248 noting that a cursor that returns zero rows still has a
2249 ``.description`` if a row-returning statement was emitted.
2250
2251 This attribute should be True for all results that are against
2252 SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
2253 that use RETURNING. For INSERT/UPDATE/DELETE statements that were
2254 not using RETURNING, the value will usually be False, however
2255 there are some dialect-specific exceptions to this, such as when
2256 using the MSSQL / pyodbc dialect a SELECT is emitted inline in
2257 order to retrieve an inserted primary key value.
2258
2259
2260 """
2261 return self._metadata.returns_rows
2262
2263 @property
2264 def is_insert(self):
2265 """True if this :class:`_engine.CursorResult` is the result
2266 of a executing an expression language compiled
2267 :func:`_expression.insert` construct.
2268
2269 When True, this implies that the
2270 :attr:`inserted_primary_key` attribute is accessible,
2271 assuming the statement did not include
2272 a user defined "returning" construct.
2273
2274 """
2275 return self.context.isinsert
2276
2277 def _fetchiter_impl(self):
2278 fetchone = self.cursor_strategy.fetchone
2279
2280 while True:
2281 row = fetchone(self, self.cursor)
2282 if row is None:
2283 break
2284 yield row
2285
2286 def _fetchone_impl(self, hard_close=False):
2287 return self.cursor_strategy.fetchone(self, self.cursor, hard_close)
2288
2289 def _fetchall_impl(self):
2290 return self.cursor_strategy.fetchall(self, self.cursor)
2291
2292 def _fetchmany_impl(self, size=None):
2293 return self.cursor_strategy.fetchmany(self, self.cursor, size)
2294
2295 def _raw_row_iterator(self):
2296 return self._fetchiter_impl()
2297
2298 def merge(
2299 self, *others: Result[Unpack[TupleAny]]
2300 ) -> MergedResult[Unpack[TupleAny]]:
2301 merged_result = super().merge(*others)
2302 if self.context._has_rowcount:
2303 merged_result.rowcount = sum(
2304 cast("CursorResult[Any]", result).rowcount
2305 for result in (self,) + others
2306 )
2307 return merged_result
2308
2309 def close(self) -> Any:
2310 """Close this :class:`_engine.CursorResult`.
2311
2312 This closes out the underlying DBAPI cursor corresponding to the
2313 statement execution, if one is still present. Note that the DBAPI
2314 cursor is automatically released when the :class:`_engine.CursorResult`
2315 exhausts all available rows. :meth:`_engine.CursorResult.close` is
2316 generally an optional method except in the case when discarding a
2317 :class:`_engine.CursorResult` that still has additional rows pending
2318 for fetch.
2319
2320 After this method is called, it is no longer valid to call upon
2321 the fetch methods, which will raise a :class:`.ResourceClosedError`
2322 on subsequent use.
2323
2324 .. seealso::
2325
2326 :ref:`connections_toplevel`
2327
2328 """
2329 self._soft_close(hard=True)
2330
2331 @_generative
2332 def yield_per(self, num: int) -> Self:
2333 self._yield_per = num
2334 self.cursor_strategy.yield_per(self, self.cursor, num)
2335 return self
2336
2337
2338ResultProxy = CursorResult