Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py: 27%
684 statements
« prev ^ index » next coverage.py v7.0.1, created at 2022-12-25 06:11 +0000
« prev ^ index » next coverage.py v7.0.1, created at 2022-12-25 06:11 +0000
1# engine/cursor.py
2# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
3# <see AUTHORS file>
4#
5# This module is part of SQLAlchemy and is released under
6# the MIT License: https://www.opensource.org/licenses/mit-license.php
8"""Define cursor-specific result set constructs including
9:class:`.BaseCursorResult`, :class:`.CursorResult`."""
12import collections
13import functools
15from .result import Result
16from .result import ResultMetaData
17from .result import SimpleResultMetaData
18from .result import tuplegetter
19from .row import LegacyRow
20from .. import exc
21from .. import util
22from ..sql import expression
23from ..sql import sqltypes
24from ..sql import util as sql_util
25from ..sql.base import _generative
26from ..sql.compiler import RM_NAME
27from ..sql.compiler import RM_OBJECTS
28from ..sql.compiler import RM_RENDERED_NAME
29from ..sql.compiler import RM_TYPE
31_UNPICKLED = util.symbol("unpickled")
34# metadata entry tuple indexes.
35# using raw tuple is faster than namedtuple.
36MD_INDEX = 0 # integer index in cursor.description
37MD_RESULT_MAP_INDEX = 1 # integer index in compiled._result_columns
38MD_OBJECTS = 2 # other string keys and ColumnElement obj that can match
39MD_LOOKUP_KEY = 3 # string key we usually expect for key-based lookup
40MD_RENDERED_NAME = 4 # name that is usually in cursor.description
41MD_PROCESSOR = 5 # callable to process a result value into a row
42MD_UNTRANSLATED = 6 # raw name from cursor.description
45class CursorResultMetaData(ResultMetaData):
46 """Result metadata for DBAPI cursors."""
48 __slots__ = (
49 "_keymap",
50 "case_sensitive",
51 "_processors",
52 "_keys",
53 "_keymap_by_result_column_idx",
54 "_tuplefilter",
55 "_translated_indexes",
56 "_safe_for_cache"
57 # don't need _unique_filters support here for now. Can be added
58 # if a need arises.
59 )
61 returns_rows = True
63 def _has_key(self, key):
64 return key in self._keymap
66 def _for_freeze(self):
67 return SimpleResultMetaData(
68 self._keys,
69 extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
70 )
72 def _reduce(self, keys):
73 recs = list(self._metadata_for_keys(keys))
75 indexes = [rec[MD_INDEX] for rec in recs]
76 new_keys = [rec[MD_LOOKUP_KEY] for rec in recs]
78 if self._translated_indexes:
79 indexes = [self._translated_indexes[idx] for idx in indexes]
81 tup = tuplegetter(*indexes)
83 new_metadata = self.__class__.__new__(self.__class__)
84 new_metadata.case_sensitive = self.case_sensitive
85 new_metadata._processors = self._processors
86 new_metadata._keys = new_keys
87 new_metadata._tuplefilter = tup
88 new_metadata._translated_indexes = indexes
90 new_recs = [
91 (index,) + rec[1:]
92 for index, rec in enumerate(self._metadata_for_keys(keys))
93 ]
94 new_metadata._keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
96 # TODO: need unit test for:
97 # result = connection.execute("raw sql, no columns").scalars()
98 # without the "or ()" it's failing because MD_OBJECTS is None
99 new_metadata._keymap.update(
100 {
101 e: new_rec
102 for new_rec in new_recs
103 for e in new_rec[MD_OBJECTS] or ()
104 }
105 )
107 return new_metadata
109 def _adapt_to_context(self, context):
110 """When using a cached Compiled construct that has a _result_map,
111 for a new statement that used the cached Compiled, we need to ensure
112 the keymap has the Column objects from our new statement as keys.
113 So here we rewrite keymap with new entries for the new columns
114 as matched to those of the cached statement.
116 """
118 if not context.compiled._result_columns:
119 return self
121 compiled_statement = context.compiled.statement
122 invoked_statement = context.invoked_statement
124 if compiled_statement is invoked_statement:
125 return self
127 # make a copy and add the columns from the invoked statement
128 # to the result map.
129 md = self.__class__.__new__(self.__class__)
131 md._keymap = dict(self._keymap)
133 keymap_by_position = self._keymap_by_result_column_idx
135 for idx, new in enumerate(invoked_statement._all_selected_columns):
136 try:
137 rec = keymap_by_position[idx]
138 except KeyError:
139 # this can happen when there are bogus column entries
140 # in a TextualSelect
141 pass
142 else:
143 md._keymap[new] = rec
145 md.case_sensitive = self.case_sensitive
146 md._processors = self._processors
147 assert not self._tuplefilter
148 md._tuplefilter = None
149 md._translated_indexes = None
150 md._keys = self._keys
151 md._keymap_by_result_column_idx = self._keymap_by_result_column_idx
152 md._safe_for_cache = self._safe_for_cache
153 return md
155 def __init__(self, parent, cursor_description):
156 context = parent.context
157 dialect = context.dialect
158 self._tuplefilter = None
159 self._translated_indexes = None
160 self.case_sensitive = dialect.case_sensitive
161 self._safe_for_cache = False
163 if context.result_column_struct:
164 (
165 result_columns,
166 cols_are_ordered,
167 textual_ordered,
168 ad_hoc_textual,
169 loose_column_name_matching,
170 ) = context.result_column_struct
171 num_ctx_cols = len(result_columns)
172 else:
173 result_columns = (
174 cols_are_ordered
175 ) = (
176 num_ctx_cols
177 ) = (
178 ad_hoc_textual
179 ) = loose_column_name_matching = textual_ordered = False
181 # merge cursor.description with the column info
182 # present in the compiled structure, if any
183 raw = self._merge_cursor_description(
184 context,
185 cursor_description,
186 result_columns,
187 num_ctx_cols,
188 cols_are_ordered,
189 textual_ordered,
190 ad_hoc_textual,
191 loose_column_name_matching,
192 )
194 self._keymap = {}
196 # processors in key order for certain per-row
197 # views like __iter__ and slices
198 self._processors = [
199 metadata_entry[MD_PROCESSOR] for metadata_entry in raw
200 ]
202 if context.compiled:
203 self._keymap_by_result_column_idx = {
204 metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
205 for metadata_entry in raw
206 }
208 # keymap by primary string...
209 by_key = dict(
210 [
211 (metadata_entry[MD_LOOKUP_KEY], metadata_entry)
212 for metadata_entry in raw
213 ]
214 )
216 # for compiled SQL constructs, copy additional lookup keys into
217 # the key lookup map, such as Column objects, labels,
218 # column keys and other names
219 if num_ctx_cols:
221 if len(by_key) != num_ctx_cols:
222 # if by-primary-string dictionary smaller than
223 # number of columns, assume we have dupes; (this check
224 # is also in place if string dictionary is bigger, as
225 # can occur when '*' was used as one of the compiled columns,
226 # which may or may not be suggestive of dupes), rewrite
227 # dupe records with "None" for index which results in
228 # ambiguous column exception when accessed.
229 #
230 # this is considered to be the less common case as it is not
231 # common to have dupe column keys in a SELECT statement.
232 #
233 # new in 1.4: get the complete set of all possible keys,
234 # strings, objects, whatever, that are dupes across two
235 # different records, first.
236 index_by_key = {}
237 dupes = set()
238 for metadata_entry in raw:
239 for key in (metadata_entry[MD_RENDERED_NAME],) + (
240 metadata_entry[MD_OBJECTS] or ()
241 ):
242 if not self.case_sensitive and isinstance(
243 key, util.string_types
244 ):
245 key = key.lower()
246 idx = metadata_entry[MD_INDEX]
247 # if this key has been associated with more than one
248 # positional index, it's a dupe
249 if index_by_key.setdefault(key, idx) != idx:
250 dupes.add(key)
252 # then put everything we have into the keymap excluding only
253 # those keys that are dupes.
254 self._keymap.update(
255 [
256 (obj_elem, metadata_entry)
257 for metadata_entry in raw
258 if metadata_entry[MD_OBJECTS]
259 for obj_elem in metadata_entry[MD_OBJECTS]
260 if obj_elem not in dupes
261 ]
262 )
264 # then for the dupe keys, put the "ambiguous column"
265 # record into by_key.
266 by_key.update({key: (None, None, (), key) for key in dupes})
268 else:
269 # no dupes - copy secondary elements from compiled
270 # columns into self._keymap
271 self._keymap.update(
272 [
273 (obj_elem, metadata_entry)
274 for metadata_entry in raw
275 if metadata_entry[MD_OBJECTS]
276 for obj_elem in metadata_entry[MD_OBJECTS]
277 ]
278 )
280 # update keymap with primary string names taking
281 # precedence
282 self._keymap.update(by_key)
284 # update keymap with "translated" names (sqlite-only thing)
285 if not num_ctx_cols and context._translate_colname:
286 self._keymap.update(
287 [
288 (
289 metadata_entry[MD_UNTRANSLATED],
290 self._keymap[metadata_entry[MD_LOOKUP_KEY]],
291 )
292 for metadata_entry in raw
293 if metadata_entry[MD_UNTRANSLATED]
294 ]
295 )
297 def _merge_cursor_description(
298 self,
299 context,
300 cursor_description,
301 result_columns,
302 num_ctx_cols,
303 cols_are_ordered,
304 textual_ordered,
305 ad_hoc_textual,
306 loose_column_name_matching,
307 ):
308 """Merge a cursor.description with compiled result column information.
310 There are at least four separate strategies used here, selected
311 depending on the type of SQL construct used to start with.
313 The most common case is that of the compiled SQL expression construct,
314 which generated the column names present in the raw SQL string and
315 which has the identical number of columns as were reported by
316 cursor.description. In this case, we assume a 1-1 positional mapping
317 between the entries in cursor.description and the compiled object.
318 This is also the most performant case as we disregard extracting /
319 decoding the column names present in cursor.description since we
320 already have the desired name we generated in the compiled SQL
321 construct.
323 The next common case is that of the completely raw string SQL,
324 such as passed to connection.execute(). In this case we have no
325 compiled construct to work with, so we extract and decode the
326 names from cursor.description and index those as the primary
327 result row target keys.
329 The remaining fairly common case is that of the textual SQL
330 that includes at least partial column information; this is when
331 we use a :class:`_expression.TextualSelect` construct.
332 This construct may have
333 unordered or ordered column information. In the ordered case, we
334 merge the cursor.description and the compiled construct's information
335 positionally, and warn if there are additional description names
336 present, however we still decode the names in cursor.description
337 as we don't have a guarantee that the names in the columns match
338 on these. In the unordered case, we match names in cursor.description
339 to that of the compiled construct based on name matching.
340 In both of these cases, the cursor.description names and the column
341 expression objects and names are indexed as result row target keys.
343 The final case is much less common, where we have a compiled
344 non-textual SQL expression construct, but the number of columns
345 in cursor.description doesn't match what's in the compiled
346 construct. We make the guess here that there might be textual
347 column expressions in the compiled construct that themselves include
348 a comma in them causing them to split. We do the same name-matching
349 as with textual non-ordered columns.
351 The name-matched system of merging is the same as that used by
352 SQLAlchemy for all cases up through te 0.9 series. Positional
353 matching for compiled SQL expressions was introduced in 1.0 as a
354 major performance feature, and positional matching for textual
355 :class:`_expression.TextualSelect` objects in 1.1.
356 As name matching is no longer
357 a common case, it was acceptable to factor it into smaller generator-
358 oriented methods that are easier to understand, but incur slightly
359 more performance overhead.
361 """
363 case_sensitive = context.dialect.case_sensitive
365 if (
366 num_ctx_cols
367 and cols_are_ordered
368 and not textual_ordered
369 and num_ctx_cols == len(cursor_description)
370 ):
371 self._keys = [elem[0] for elem in result_columns]
372 # pure positional 1-1 case; doesn't need to read
373 # the names from cursor.description
375 # this metadata is safe to cache because we are guaranteed
376 # to have the columns in the same order for new executions
377 self._safe_for_cache = True
378 return [
379 (
380 idx,
381 idx,
382 rmap_entry[RM_OBJECTS],
383 rmap_entry[RM_NAME].lower()
384 if not case_sensitive
385 else rmap_entry[RM_NAME],
386 rmap_entry[RM_RENDERED_NAME],
387 context.get_result_processor(
388 rmap_entry[RM_TYPE],
389 rmap_entry[RM_RENDERED_NAME],
390 cursor_description[idx][1],
391 ),
392 None,
393 )
394 for idx, rmap_entry in enumerate(result_columns)
395 ]
396 else:
398 # name-based or text-positional cases, where we need
399 # to read cursor.description names
401 if textual_ordered or (
402 ad_hoc_textual and len(cursor_description) == num_ctx_cols
403 ):
404 self._safe_for_cache = True
405 # textual positional case
406 raw_iterator = self._merge_textual_cols_by_position(
407 context, cursor_description, result_columns
408 )
409 elif num_ctx_cols:
410 # compiled SQL with a mismatch of description cols
411 # vs. compiled cols, or textual w/ unordered columns
412 # the order of columns can change if the query is
413 # against a "select *", so not safe to cache
414 self._safe_for_cache = False
415 raw_iterator = self._merge_cols_by_name(
416 context,
417 cursor_description,
418 result_columns,
419 loose_column_name_matching,
420 )
421 else:
422 # no compiled SQL, just a raw string, order of columns
423 # can change for "select *"
424 self._safe_for_cache = False
425 raw_iterator = self._merge_cols_by_none(
426 context, cursor_description
427 )
429 return [
430 (
431 idx,
432 ridx,
433 obj,
434 cursor_colname,
435 cursor_colname,
436 context.get_result_processor(
437 mapped_type, cursor_colname, coltype
438 ),
439 untranslated,
440 )
441 for (
442 idx,
443 ridx,
444 cursor_colname,
445 mapped_type,
446 coltype,
447 obj,
448 untranslated,
449 ) in raw_iterator
450 ]
452 def _colnames_from_description(self, context, cursor_description):
453 """Extract column names and data types from a cursor.description.
455 Applies unicode decoding, column translation, "normalization",
456 and case sensitivity rules to the names based on the dialect.
458 """
460 dialect = context.dialect
461 case_sensitive = dialect.case_sensitive
462 translate_colname = context._translate_colname
463 description_decoder = (
464 dialect._description_decoder
465 if dialect.description_encoding
466 else None
467 )
468 normalize_name = (
469 dialect.normalize_name if dialect.requires_name_normalize else None
470 )
471 untranslated = None
473 self._keys = []
475 for idx, rec in enumerate(cursor_description):
476 colname = rec[0]
477 coltype = rec[1]
479 if description_decoder:
480 colname = description_decoder(colname)
482 if translate_colname:
483 colname, untranslated = translate_colname(colname)
485 if normalize_name:
486 colname = normalize_name(colname)
488 self._keys.append(colname)
489 if not case_sensitive:
490 colname = colname.lower()
492 yield idx, colname, untranslated, coltype
494 def _merge_textual_cols_by_position(
495 self, context, cursor_description, result_columns
496 ):
497 num_ctx_cols = len(result_columns) if result_columns else None
499 if num_ctx_cols > len(cursor_description):
500 util.warn(
501 "Number of columns in textual SQL (%d) is "
502 "smaller than number of columns requested (%d)"
503 % (num_ctx_cols, len(cursor_description))
504 )
505 seen = set()
506 for (
507 idx,
508 colname,
509 untranslated,
510 coltype,
511 ) in self._colnames_from_description(context, cursor_description):
512 if idx < num_ctx_cols:
513 ctx_rec = result_columns[idx]
514 obj = ctx_rec[RM_OBJECTS]
515 ridx = idx
516 mapped_type = ctx_rec[RM_TYPE]
517 if obj[0] in seen:
518 raise exc.InvalidRequestError(
519 "Duplicate column expression requested "
520 "in textual SQL: %r" % obj[0]
521 )
522 seen.add(obj[0])
523 else:
524 mapped_type = sqltypes.NULLTYPE
525 obj = None
526 ridx = None
527 yield idx, ridx, colname, mapped_type, coltype, obj, untranslated
529 def _merge_cols_by_name(
530 self,
531 context,
532 cursor_description,
533 result_columns,
534 loose_column_name_matching,
535 ):
536 dialect = context.dialect
537 case_sensitive = dialect.case_sensitive
538 match_map = self._create_description_match_map(
539 result_columns, case_sensitive, loose_column_name_matching
540 )
541 for (
542 idx,
543 colname,
544 untranslated,
545 coltype,
546 ) in self._colnames_from_description(context, cursor_description):
547 try:
548 ctx_rec = match_map[colname]
549 except KeyError:
550 mapped_type = sqltypes.NULLTYPE
551 obj = None
552 result_columns_idx = None
553 else:
554 obj = ctx_rec[1]
555 mapped_type = ctx_rec[2]
556 result_columns_idx = ctx_rec[3]
557 yield (
558 idx,
559 result_columns_idx,
560 colname,
561 mapped_type,
562 coltype,
563 obj,
564 untranslated,
565 )
567 @classmethod
568 def _create_description_match_map(
569 cls,
570 result_columns,
571 case_sensitive=True,
572 loose_column_name_matching=False,
573 ):
574 """when matching cursor.description to a set of names that are present
575 in a Compiled object, as is the case with TextualSelect, get all the
576 names we expect might match those in cursor.description.
577 """
579 d = {}
580 for ridx, elem in enumerate(result_columns):
581 key = elem[RM_RENDERED_NAME]
583 if not case_sensitive:
584 key = key.lower()
585 if key in d:
586 # conflicting keyname - just add the column-linked objects
587 # to the existing record. if there is a duplicate column
588 # name in the cursor description, this will allow all of those
589 # objects to raise an ambiguous column error
590 e_name, e_obj, e_type, e_ridx = d[key]
591 d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx
592 else:
593 d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx)
595 if loose_column_name_matching:
596 # when using a textual statement with an unordered set
597 # of columns that line up, we are expecting the user
598 # to be using label names in the SQL that match to the column
599 # expressions. Enable more liberal matching for this case;
600 # duplicate keys that are ambiguous will be fixed later.
601 for r_key in elem[RM_OBJECTS]:
602 d.setdefault(
603 r_key,
604 (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx),
605 )
607 return d
609 def _merge_cols_by_none(self, context, cursor_description):
610 for (
611 idx,
612 colname,
613 untranslated,
614 coltype,
615 ) in self._colnames_from_description(context, cursor_description):
616 yield (
617 idx,
618 None,
619 colname,
620 sqltypes.NULLTYPE,
621 coltype,
622 None,
623 untranslated,
624 )
626 def _key_fallback(self, key, err, raiseerr=True):
627 if raiseerr:
628 util.raise_(
629 exc.NoSuchColumnError(
630 "Could not locate column in row for column '%s'"
631 % util.string_or_unprintable(key)
632 ),
633 replace_context=err,
634 )
635 else:
636 return None
638 def _raise_for_ambiguous_column_name(self, rec):
639 raise exc.InvalidRequestError(
640 "Ambiguous column name '%s' in "
641 "result set column descriptions" % rec[MD_LOOKUP_KEY]
642 )
644 def _index_for_key(self, key, raiseerr=True):
645 # TODO: can consider pre-loading ints and negative ints
646 # into _keymap - also no coverage here
647 if isinstance(key, int):
648 key = self._keys[key]
650 try:
651 rec = self._keymap[key]
652 except KeyError as ke:
653 rec = self._key_fallback(key, ke, raiseerr)
654 if rec is None:
655 return None
657 index = rec[0]
659 if index is None:
660 self._raise_for_ambiguous_column_name(rec)
661 return index
663 def _indexes_for_keys(self, keys):
665 try:
666 return [self._keymap[key][0] for key in keys]
667 except KeyError as ke:
668 # ensure it raises
669 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
671 def _metadata_for_keys(self, keys):
672 for key in keys:
673 if int in key.__class__.__mro__:
674 key = self._keys[key]
676 try:
677 rec = self._keymap[key]
678 except KeyError as ke:
679 # ensure it raises
680 CursorResultMetaData._key_fallback(self, ke.args[0], ke)
682 index = rec[0]
684 if index is None:
685 self._raise_for_ambiguous_column_name(rec)
687 yield rec
689 def __getstate__(self):
690 return {
691 "_keymap": {
692 key: (rec[MD_INDEX], rec[MD_RESULT_MAP_INDEX], _UNPICKLED, key)
693 for key, rec in self._keymap.items()
694 if isinstance(key, util.string_types + util.int_types)
695 },
696 "_keys": self._keys,
697 "case_sensitive": self.case_sensitive,
698 "_translated_indexes": self._translated_indexes,
699 "_tuplefilter": self._tuplefilter,
700 }
702 def __setstate__(self, state):
703 self._processors = [None for _ in range(len(state["_keys"]))]
704 self._keymap = state["_keymap"]
706 self._keymap_by_result_column_idx = {
707 rec[MD_RESULT_MAP_INDEX]: rec for rec in self._keymap.values()
708 }
709 self._keys = state["_keys"]
710 self.case_sensitive = state["case_sensitive"]
712 if state["_translated_indexes"]:
713 self._translated_indexes = state["_translated_indexes"]
714 self._tuplefilter = tuplegetter(*self._translated_indexes)
715 else:
716 self._translated_indexes = self._tuplefilter = None
719class LegacyCursorResultMetaData(CursorResultMetaData):
720 __slots__ = ()
722 def _contains(self, value, row):
723 key = value
724 if key in self._keymap:
725 util.warn_deprecated_20(
726 "Using the 'in' operator to test for string or column "
727 "keys, or integer indexes, in a :class:`.Row` object is "
728 "deprecated and will "
729 "be removed in a future release. "
730 "Use the `Row._fields` or `Row._mapping` attribute, i.e. "
731 "'key in row._fields'",
732 )
733 return True
734 else:
735 return self._key_fallback(key, None, False) is not None
737 def _key_fallback(self, key, err, raiseerr=True):
738 map_ = self._keymap
739 result = None
741 if isinstance(key, util.string_types):
742 result = map_.get(key if self.case_sensitive else key.lower())
743 elif isinstance(key, expression.ColumnElement):
744 if (
745 key._tq_label
746 and (
747 key._tq_label
748 if self.case_sensitive
749 else key._tq_label.lower()
750 )
751 in map_
752 ):
753 result = map_[
754 key._tq_label
755 if self.case_sensitive
756 else key._tq_label.lower()
757 ]
758 elif (
759 hasattr(key, "name")
760 and (key.name if self.case_sensitive else key.name.lower())
761 in map_
762 ):
763 # match is only on name.
764 result = map_[
765 key.name if self.case_sensitive else key.name.lower()
766 ]
768 # search extra hard to make sure this
769 # isn't a column/label name overlap.
770 # this check isn't currently available if the row
771 # was unpickled.
772 if result is not None and result[MD_OBJECTS] not in (
773 None,
774 _UNPICKLED,
775 ):
776 for obj in result[MD_OBJECTS]:
777 if key._compare_name_for_result(obj):
778 break
779 else:
780 result = None
781 if result is not None:
782 if result[MD_OBJECTS] is _UNPICKLED:
783 util.warn_deprecated(
784 "Retrieving row values using Column objects from a "
785 "row that was unpickled is deprecated; adequate "
786 "state cannot be pickled for this to be efficient. "
787 "This usage will raise KeyError in a future release.",
788 version="1.4",
789 )
790 else:
791 util.warn_deprecated(
792 "Retrieving row values using Column objects with only "
793 "matching names as keys is deprecated, and will raise "
794 "KeyError in a future release; only Column "
795 "objects that are explicitly part of the statement "
796 "object should be used.",
797 version="1.4",
798 )
799 if result is None:
800 if raiseerr:
801 util.raise_(
802 exc.NoSuchColumnError(
803 "Could not locate column in row for column '%s'"
804 % util.string_or_unprintable(key)
805 ),
806 replace_context=err,
807 )
808 else:
809 return None
810 else:
811 map_[key] = result
812 return result
814 def _warn_for_nonint(self, key):
815 util.warn_deprecated_20(
816 "Using non-integer/slice indices on Row is deprecated and will "
817 "be removed in version 2.0; please use row._mapping[<key>], or "
818 "the mappings() accessor on the Result object.",
819 stacklevel=4,
820 )
822 def _has_key(self, key):
823 if key in self._keymap:
824 return True
825 else:
826 return self._key_fallback(key, None, False) is not None
829class ResultFetchStrategy(object):
830 """Define a fetching strategy for a result object.
833 .. versionadded:: 1.4
835 """
837 __slots__ = ()
839 alternate_cursor_description = None
841 def soft_close(self, result, dbapi_cursor):
842 raise NotImplementedError()
844 def hard_close(self, result, dbapi_cursor):
845 raise NotImplementedError()
847 def yield_per(self, result, dbapi_cursor, num):
848 return
850 def fetchone(self, result, dbapi_cursor, hard_close=False):
851 raise NotImplementedError()
853 def fetchmany(self, result, dbapi_cursor, size=None):
854 raise NotImplementedError()
856 def fetchall(self, result):
857 raise NotImplementedError()
859 def handle_exception(self, result, dbapi_cursor, err):
860 raise err
863class NoCursorFetchStrategy(ResultFetchStrategy):
864 """Cursor strategy for a result that has no open cursor.
866 There are two varieties of this strategy, one for DQL and one for
867 DML (and also DDL), each of which represent a result that had a cursor
868 but no longer has one.
870 """
872 __slots__ = ()
874 def soft_close(self, result, dbapi_cursor):
875 pass
877 def hard_close(self, result, dbapi_cursor):
878 pass
880 def fetchone(self, result, dbapi_cursor, hard_close=False):
881 return self._non_result(result, None)
883 def fetchmany(self, result, dbapi_cursor, size=None):
884 return self._non_result(result, [])
886 def fetchall(self, result, dbapi_cursor):
887 return self._non_result(result, [])
889 def _non_result(self, result, default, err=None):
890 raise NotImplementedError()
893class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
894 """Cursor strategy for a DQL result that has no open cursor.
896 This is a result set that can return rows, i.e. for a SELECT, or for an
897 INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
898 where the cursor is closed and no rows remain available. The owning result
899 object may or may not be "hard closed", which determines if the fetch
900 methods send empty results or raise for closed result.
902 """
904 __slots__ = ()
906 def _non_result(self, result, default, err=None):
907 if result.closed:
908 util.raise_(
909 exc.ResourceClosedError("This result object is closed."),
910 replace_context=err,
911 )
912 else:
913 return default
916_NO_CURSOR_DQL = NoCursorDQLFetchStrategy()
919class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
920 """Cursor strategy for a DML result that has no open cursor.
922 This is a result set that does not return rows, i.e. for an INSERT,
923 UPDATE, DELETE that does not include RETURNING.
925 """
927 __slots__ = ()
929 def _non_result(self, result, default, err=None):
930 # we only expect to have a _NoResultMetaData() here right now.
931 assert not result._metadata.returns_rows
932 result._metadata._we_dont_return_rows(err)
935_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
938class CursorFetchStrategy(ResultFetchStrategy):
939 """Call fetch methods from a DBAPI cursor.
941 Alternate versions of this class may instead buffer the rows from
942 cursors or not use cursors at all.
944 """
946 __slots__ = ()
948 def soft_close(self, result, dbapi_cursor):
949 result.cursor_strategy = _NO_CURSOR_DQL
951 def hard_close(self, result, dbapi_cursor):
952 result.cursor_strategy = _NO_CURSOR_DQL
954 def handle_exception(self, result, dbapi_cursor, err):
955 result.connection._handle_dbapi_exception(
956 err, None, None, dbapi_cursor, result.context
957 )
959 def yield_per(self, result, dbapi_cursor, num):
960 result.cursor_strategy = BufferedRowCursorFetchStrategy(
961 dbapi_cursor,
962 {"max_row_buffer": num},
963 initial_buffer=collections.deque(),
964 growth_factor=0,
965 )
967 def fetchone(self, result, dbapi_cursor, hard_close=False):
968 try:
969 row = dbapi_cursor.fetchone()
970 if row is None:
971 result._soft_close(hard=hard_close)
972 return row
973 except BaseException as e:
974 self.handle_exception(result, dbapi_cursor, e)
976 def fetchmany(self, result, dbapi_cursor, size=None):
977 try:
978 if size is None:
979 l = dbapi_cursor.fetchmany()
980 else:
981 l = dbapi_cursor.fetchmany(size)
983 if not l:
984 result._soft_close()
985 return l
986 except BaseException as e:
987 self.handle_exception(result, dbapi_cursor, e)
989 def fetchall(self, result, dbapi_cursor):
990 try:
991 rows = dbapi_cursor.fetchall()
992 result._soft_close()
993 return rows
994 except BaseException as e:
995 self.handle_exception(result, dbapi_cursor, e)
998_DEFAULT_FETCH = CursorFetchStrategy()
1001class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
1002 """A cursor fetch strategy with row buffering behavior.
1004 This strategy buffers the contents of a selection of rows
1005 before ``fetchone()`` is called. This is to allow the results of
1006 ``cursor.description`` to be available immediately, when
1007 interfacing with a DB-API that requires rows to be consumed before
1008 this information is available (currently psycopg2, when used with
1009 server-side cursors).
1011 The pre-fetching behavior fetches only one row initially, and then
1012 grows its buffer size by a fixed amount with each successive need
1013 for additional rows up the ``max_row_buffer`` size, which defaults
1014 to 1000::
1016 with psycopg2_engine.connect() as conn:
1018 result = conn.execution_options(
1019 stream_results=True, max_row_buffer=50
1020 ).execute(text("select * from table"))
1022 .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
1024 .. seealso::
1026 :ref:`psycopg2_execution_options`
1027 """
1029 __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
1031 def __init__(
1032 self,
1033 dbapi_cursor,
1034 execution_options,
1035 growth_factor=5,
1036 initial_buffer=None,
1037 ):
1038 self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
1040 if initial_buffer is not None:
1041 self._rowbuffer = initial_buffer
1042 else:
1043 self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
1044 self._growth_factor = growth_factor
1046 if growth_factor:
1047 self._bufsize = min(self._max_row_buffer, self._growth_factor)
1048 else:
1049 self._bufsize = self._max_row_buffer
1051 @classmethod
1052 def create(cls, result):
1053 return BufferedRowCursorFetchStrategy(
1054 result.cursor,
1055 result.context.execution_options,
1056 )
1058 def _buffer_rows(self, result, dbapi_cursor):
1059 """this is currently used only by fetchone()."""
1061 size = self._bufsize
1062 try:
1063 if size < 1:
1064 new_rows = dbapi_cursor.fetchall()
1065 else:
1066 new_rows = dbapi_cursor.fetchmany(size)
1067 except BaseException as e:
1068 self.handle_exception(result, dbapi_cursor, e)
1070 if not new_rows:
1071 return
1072 self._rowbuffer = collections.deque(new_rows)
1073 if self._growth_factor and size < self._max_row_buffer:
1074 self._bufsize = min(
1075 self._max_row_buffer, size * self._growth_factor
1076 )
1078 def yield_per(self, result, dbapi_cursor, num):
1079 self._growth_factor = 0
1080 self._max_row_buffer = self._bufsize = num
1082 def soft_close(self, result, dbapi_cursor):
1083 self._rowbuffer.clear()
1084 super(BufferedRowCursorFetchStrategy, self).soft_close(
1085 result, dbapi_cursor
1086 )
1088 def hard_close(self, result, dbapi_cursor):
1089 self._rowbuffer.clear()
1090 super(BufferedRowCursorFetchStrategy, self).hard_close(
1091 result, dbapi_cursor
1092 )
1094 def fetchone(self, result, dbapi_cursor, hard_close=False):
1095 if not self._rowbuffer:
1096 self._buffer_rows(result, dbapi_cursor)
1097 if not self._rowbuffer:
1098 try:
1099 result._soft_close(hard=hard_close)
1100 except BaseException as e:
1101 self.handle_exception(result, dbapi_cursor, e)
1102 return None
1103 return self._rowbuffer.popleft()
1105 def fetchmany(self, result, dbapi_cursor, size=None):
1106 if size is None:
1107 return self.fetchall(result, dbapi_cursor)
1109 buf = list(self._rowbuffer)
1110 lb = len(buf)
1111 if size > lb:
1112 try:
1113 new = dbapi_cursor.fetchmany(size - lb)
1114 except BaseException as e:
1115 self.handle_exception(result, dbapi_cursor, e)
1116 else:
1117 if not new:
1118 result._soft_close()
1119 else:
1120 buf.extend(new)
1122 result = buf[0:size]
1123 self._rowbuffer = collections.deque(buf[size:])
1124 return result
1126 def fetchall(self, result, dbapi_cursor):
1127 try:
1128 ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
1129 self._rowbuffer.clear()
1130 result._soft_close()
1131 return ret
1132 except BaseException as e:
1133 self.handle_exception(result, dbapi_cursor, e)
1136class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
1137 """A cursor strategy that buffers rows fully upon creation.
1139 Used for operations where a result is to be delivered
1140 after the database conversation can not be continued,
1141 such as MSSQL INSERT...OUTPUT after an autocommit.
1143 """
1145 __slots__ = ("_rowbuffer", "alternate_cursor_description")
1147 def __init__(
1148 self, dbapi_cursor, alternate_description=None, initial_buffer=None
1149 ):
1150 self.alternate_cursor_description = alternate_description
1151 if initial_buffer is not None:
1152 self._rowbuffer = collections.deque(initial_buffer)
1153 else:
1154 self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
1156 def yield_per(self, result, dbapi_cursor, num):
1157 pass
1159 def soft_close(self, result, dbapi_cursor):
1160 self._rowbuffer.clear()
1161 super(FullyBufferedCursorFetchStrategy, self).soft_close(
1162 result, dbapi_cursor
1163 )
1165 def hard_close(self, result, dbapi_cursor):
1166 self._rowbuffer.clear()
1167 super(FullyBufferedCursorFetchStrategy, self).hard_close(
1168 result, dbapi_cursor
1169 )
1171 def fetchone(self, result, dbapi_cursor, hard_close=False):
1172 if self._rowbuffer:
1173 return self._rowbuffer.popleft()
1174 else:
1175 result._soft_close(hard=hard_close)
1176 return None
1178 def fetchmany(self, result, dbapi_cursor, size=None):
1179 if size is None:
1180 return self.fetchall(result, dbapi_cursor)
1182 buf = list(self._rowbuffer)
1183 rows = buf[0:size]
1184 self._rowbuffer = collections.deque(buf[size:])
1185 if not rows:
1186 result._soft_close()
1187 return rows
1189 def fetchall(self, result, dbapi_cursor):
1190 ret = self._rowbuffer
1191 self._rowbuffer = collections.deque()
1192 result._soft_close()
1193 return ret
1196class _NoResultMetaData(ResultMetaData):
1197 __slots__ = ()
1199 returns_rows = False
1201 def _we_dont_return_rows(self, err=None):
1202 util.raise_(
1203 exc.ResourceClosedError(
1204 "This result object does not return rows. "
1205 "It has been closed automatically."
1206 ),
1207 replace_context=err,
1208 )
1210 def _index_for_key(self, keys, raiseerr):
1211 self._we_dont_return_rows()
1213 def _metadata_for_keys(self, key):
1214 self._we_dont_return_rows()
1216 def _reduce(self, keys):
1217 self._we_dont_return_rows()
1219 @property
1220 def _keymap(self):
1221 self._we_dont_return_rows()
1223 @property
1224 def keys(self):
1225 self._we_dont_return_rows()
1228class _LegacyNoResultMetaData(_NoResultMetaData):
1229 @property
1230 def keys(self):
1231 util.warn_deprecated_20(
1232 "Calling the .keys() method on a result set that does not return "
1233 "rows is deprecated and will raise ResourceClosedError in "
1234 "SQLAlchemy 2.0.",
1235 )
1236 return []
1239_NO_RESULT_METADATA = _NoResultMetaData()
1240_LEGACY_NO_RESULT_METADATA = _LegacyNoResultMetaData()
1243class BaseCursorResult(object):
1244 """Base class for database result objects."""
1246 out_parameters = None
1247 _metadata = None
1248 _soft_closed = False
1249 closed = False
1251 def __init__(self, context, cursor_strategy, cursor_description):
1252 self.context = context
1253 self.dialect = context.dialect
1254 self.cursor = context.cursor
1255 self.cursor_strategy = cursor_strategy
1256 self.connection = context.root_connection
1257 self._echo = echo = (
1258 self.connection._echo and context.engine._should_log_debug()
1259 )
1261 if cursor_description is not None:
1262 # inline of Result._row_getter(), set up an initial row
1263 # getter assuming no transformations will be called as this
1264 # is the most common case
1266 if echo:
1267 log = self.context.connection._log_debug
1269 def log_row(row):
1270 log("Row %r", sql_util._repr_row(row))
1271 return row
1273 self._row_logging_fn = log_row
1274 else:
1275 log_row = None
1277 metadata = self._init_metadata(context, cursor_description)
1279 keymap = metadata._keymap
1280 processors = metadata._processors
1281 process_row = self._process_row
1282 key_style = process_row._default_key_style
1283 _make_row = functools.partial(
1284 process_row, metadata, processors, keymap, key_style
1285 )
1286 if log_row:
1288 def make_row(row):
1289 made_row = _make_row(row)
1290 log_row(made_row)
1291 return made_row
1293 else:
1294 make_row = _make_row
1295 self._set_memoized_attribute("_row_getter", make_row)
1297 else:
1298 self._metadata = self._no_result_metadata
1300 def _init_metadata(self, context, cursor_description):
1302 if context.compiled:
1303 if context.compiled._cached_metadata:
1304 metadata = self.context.compiled._cached_metadata
1305 else:
1306 metadata = self._cursor_metadata(self, cursor_description)
1307 if metadata._safe_for_cache:
1308 context.compiled._cached_metadata = metadata
1310 # result rewrite/ adapt step. this is to suit the case
1311 # when we are invoked against a cached Compiled object, we want
1312 # to rewrite the ResultMetaData to reflect the Column objects
1313 # that are in our current SQL statement object, not the one
1314 # that is associated with the cached Compiled object.
1315 # the Compiled object may also tell us to not
1316 # actually do this step; this is to support the ORM where
1317 # it is to produce a new Result object in any case, and will
1318 # be using the cached Column objects against this database result
1319 # so we don't want to rewrite them.
1320 #
1321 # Basically this step suits the use case where the end user
1322 # is using Core SQL expressions and is accessing columns in the
1323 # result row using row._mapping[table.c.column].
1324 compiled = context.compiled
1325 if (
1326 compiled
1327 and compiled._result_columns
1328 and context.cache_hit is context.dialect.CACHE_HIT
1329 and not context.execution_options.get(
1330 "_result_disable_adapt_to_context", False
1331 )
1332 and compiled.statement is not context.invoked_statement
1333 ):
1334 metadata = metadata._adapt_to_context(context)
1336 self._metadata = metadata
1338 else:
1339 self._metadata = metadata = self._cursor_metadata(
1340 self, cursor_description
1341 )
1342 if self._echo:
1343 context.connection._log_debug(
1344 "Col %r", tuple(x[0] for x in cursor_description)
1345 )
1346 return metadata
1348 def _soft_close(self, hard=False):
1349 """Soft close this :class:`_engine.CursorResult`.
1351 This releases all DBAPI cursor resources, but leaves the
1352 CursorResult "open" from a semantic perspective, meaning the
1353 fetchXXX() methods will continue to return empty results.
1355 This method is called automatically when:
1357 * all result rows are exhausted using the fetchXXX() methods.
1358 * cursor.description is None.
1360 This method is **not public**, but is documented in order to clarify
1361 the "autoclose" process used.
1363 .. versionadded:: 1.0.0
1365 .. seealso::
1367 :meth:`_engine.CursorResult.close`
1370 """
1371 if (not hard and self._soft_closed) or (hard and self.closed):
1372 return
1374 if hard:
1375 self.closed = True
1376 self.cursor_strategy.hard_close(self, self.cursor)
1377 else:
1378 self.cursor_strategy.soft_close(self, self.cursor)
1380 if not self._soft_closed:
1381 cursor = self.cursor
1382 self.cursor = None
1383 self.connection._safe_close_cursor(cursor)
1384 self._soft_closed = True
1386 @property
1387 def inserted_primary_key_rows(self):
1388 """Return the value of
1389 :attr:`_engine.CursorResult.inserted_primary_key`
1390 as a row contained within a list; some dialects may support a
1391 multiple row form as well.
1393 .. note:: As indicated below, in current SQLAlchemy versions this
1394 accessor is only useful beyond what's already supplied by
1395 :attr:`_engine.CursorResult.inserted_primary_key` when using the
1396 :ref:`postgresql_psycopg2` dialect. Future versions hope to
1397 generalize this feature to more dialects.
1399 This accessor is added to support dialects that offer the feature
1400 that is currently implemented by the :ref:`psycopg2_executemany_mode`
1401 feature, currently **only the psycopg2 dialect**, which provides
1402 for many rows to be INSERTed at once while still retaining the
1403 behavior of being able to return server-generated primary key values.
1405 * **When using the psycopg2 dialect, or other dialects that may support
1406 "fast executemany" style inserts in upcoming releases** : When
1407 invoking an INSERT statement while passing a list of rows as the
1408 second argument to :meth:`_engine.Connection.execute`, this accessor
1409 will then provide a list of rows, where each row contains the primary
1410 key value for each row that was INSERTed.
1412 * **When using all other dialects / backends that don't yet support
1413 this feature**: This accessor is only useful for **single row INSERT
1414 statements**, and returns the same information as that of the
1415 :attr:`_engine.CursorResult.inserted_primary_key` within a
1416 single-element list. When an INSERT statement is executed in
1417 conjunction with a list of rows to be INSERTed, the list will contain
1418 one row per row inserted in the statement, however it will contain
1419 ``None`` for any server-generated values.
1421 Future releases of SQLAlchemy will further generalize the
1422 "fast execution helper" feature of psycopg2 to suit other dialects,
1423 thus allowing this accessor to be of more general use.
1425 .. versionadded:: 1.4
1427 .. seealso::
1429 :attr:`_engine.CursorResult.inserted_primary_key`
1431 """
1432 if not self.context.compiled:
1433 raise exc.InvalidRequestError(
1434 "Statement is not a compiled " "expression construct."
1435 )
1436 elif not self.context.isinsert:
1437 raise exc.InvalidRequestError(
1438 "Statement is not an insert() " "expression construct."
1439 )
1440 elif self.context._is_explicit_returning:
1441 raise exc.InvalidRequestError(
1442 "Can't call inserted_primary_key "
1443 "when returning() "
1444 "is used."
1445 )
1446 return self.context.inserted_primary_key_rows
1448 @property
1449 def inserted_primary_key(self):
1450 """Return the primary key for the row just inserted.
1452 The return value is a :class:`_result.Row` object representing
1453 a named tuple of primary key values in the order in which the
1454 primary key columns are configured in the source
1455 :class:`_schema.Table`.
1457 .. versionchanged:: 1.4.8 - the
1458 :attr:`_engine.CursorResult.inserted_primary_key`
1459 value is now a named tuple via the :class:`_result.Row` class,
1460 rather than a plain tuple.
1462 This accessor only applies to single row :func:`_expression.insert`
1463 constructs which did not explicitly specify
1464 :meth:`_expression.Insert.returning`. Support for multirow inserts,
1465 while not yet available for most backends, would be accessed using
1466 the :attr:`_engine.CursorResult.inserted_primary_key_rows` accessor.
1468 Note that primary key columns which specify a server_default clause, or
1469 otherwise do not qualify as "autoincrement" columns (see the notes at
1470 :class:`_schema.Column`), and were generated using the database-side
1471 default, will appear in this list as ``None`` unless the backend
1472 supports "returning" and the insert statement executed with the
1473 "implicit returning" enabled.
1475 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1476 statement is not a compiled expression construct
1477 or is not an insert() construct.
1479 """
1481 if self.context.executemany:
1482 raise exc.InvalidRequestError(
1483 "This statement was an executemany call; if primary key "
1484 "returning is supported, please "
1485 "use .inserted_primary_key_rows."
1486 )
1488 ikp = self.inserted_primary_key_rows
1489 if ikp:
1490 return ikp[0]
1491 else:
1492 return None
1494 def last_updated_params(self):
1495 """Return the collection of updated parameters from this
1496 execution.
1498 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1499 statement is not a compiled expression construct
1500 or is not an update() construct.
1502 """
1503 if not self.context.compiled:
1504 raise exc.InvalidRequestError(
1505 "Statement is not a compiled " "expression construct."
1506 )
1507 elif not self.context.isupdate:
1508 raise exc.InvalidRequestError(
1509 "Statement is not an update() " "expression construct."
1510 )
1511 elif self.context.executemany:
1512 return self.context.compiled_parameters
1513 else:
1514 return self.context.compiled_parameters[0]
1516 def last_inserted_params(self):
1517 """Return the collection of inserted parameters from this
1518 execution.
1520 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1521 statement is not a compiled expression construct
1522 or is not an insert() construct.
1524 """
1525 if not self.context.compiled:
1526 raise exc.InvalidRequestError(
1527 "Statement is not a compiled " "expression construct."
1528 )
1529 elif not self.context.isinsert:
1530 raise exc.InvalidRequestError(
1531 "Statement is not an insert() " "expression construct."
1532 )
1533 elif self.context.executemany:
1534 return self.context.compiled_parameters
1535 else:
1536 return self.context.compiled_parameters[0]
1538 @property
1539 def returned_defaults_rows(self):
1540 """Return a list of rows each containing the values of default
1541 columns that were fetched using
1542 the :meth:`.ValuesBase.return_defaults` feature.
1544 The return value is a list of :class:`.Row` objects.
1546 .. versionadded:: 1.4
1548 """
1549 return self.context.returned_default_rows
1551 @property
1552 def returned_defaults(self):
1553 """Return the values of default columns that were fetched using
1554 the :meth:`.ValuesBase.return_defaults` feature.
1556 The value is an instance of :class:`.Row`, or ``None``
1557 if :meth:`.ValuesBase.return_defaults` was not used or if the
1558 backend does not support RETURNING.
1560 .. versionadded:: 0.9.0
1562 .. seealso::
1564 :meth:`.ValuesBase.return_defaults`
1566 """
1568 if self.context.executemany:
1569 raise exc.InvalidRequestError(
1570 "This statement was an executemany call; if return defaults "
1571 "is supported, please use .returned_defaults_rows."
1572 )
1574 rows = self.context.returned_default_rows
1575 if rows:
1576 return rows[0]
1577 else:
1578 return None
1580 def lastrow_has_defaults(self):
1581 """Return ``lastrow_has_defaults()`` from the underlying
1582 :class:`.ExecutionContext`.
1584 See :class:`.ExecutionContext` for details.
1586 """
1588 return self.context.lastrow_has_defaults()
1590 def postfetch_cols(self):
1591 """Return ``postfetch_cols()`` from the underlying
1592 :class:`.ExecutionContext`.
1594 See :class:`.ExecutionContext` for details.
1596 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1597 statement is not a compiled expression construct
1598 or is not an insert() or update() construct.
1600 """
1602 if not self.context.compiled:
1603 raise exc.InvalidRequestError(
1604 "Statement is not a compiled " "expression construct."
1605 )
1606 elif not self.context.isinsert and not self.context.isupdate:
1607 raise exc.InvalidRequestError(
1608 "Statement is not an insert() or update() "
1609 "expression construct."
1610 )
1611 return self.context.postfetch_cols
1613 def prefetch_cols(self):
1614 """Return ``prefetch_cols()`` from the underlying
1615 :class:`.ExecutionContext`.
1617 See :class:`.ExecutionContext` for details.
1619 Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
1620 statement is not a compiled expression construct
1621 or is not an insert() or update() construct.
1623 """
1625 if not self.context.compiled:
1626 raise exc.InvalidRequestError(
1627 "Statement is not a compiled " "expression construct."
1628 )
1629 elif not self.context.isinsert and not self.context.isupdate:
1630 raise exc.InvalidRequestError(
1631 "Statement is not an insert() or update() "
1632 "expression construct."
1633 )
1634 return self.context.prefetch_cols
1636 def supports_sane_rowcount(self):
1637 """Return ``supports_sane_rowcount`` from the dialect.
1639 See :attr:`_engine.CursorResult.rowcount` for background.
1641 """
1643 return self.dialect.supports_sane_rowcount
1645 def supports_sane_multi_rowcount(self):
1646 """Return ``supports_sane_multi_rowcount`` from the dialect.
1648 See :attr:`_engine.CursorResult.rowcount` for background.
1650 """
1652 return self.dialect.supports_sane_multi_rowcount
1654 @util.memoized_property
1655 def rowcount(self):
1656 """Return the 'rowcount' for this result.
1658 The 'rowcount' reports the number of rows *matched*
1659 by the WHERE criterion of an UPDATE or DELETE statement.
1661 .. note::
1663 Notes regarding :attr:`_engine.CursorResult.rowcount`:
1666 * This attribute returns the number of rows *matched*,
1667 which is not necessarily the same as the number of rows
1668 that were actually *modified* - an UPDATE statement, for example,
1669 may have no net change on a given row if the SET values
1670 given are the same as those present in the row already.
1671 Such a row would be matched but not modified.
1672 On backends that feature both styles, such as MySQL,
1673 rowcount is configured by default to return the match
1674 count in all cases.
1676 * :attr:`_engine.CursorResult.rowcount`
1677 is *only* useful in conjunction
1678 with an UPDATE or DELETE statement. Contrary to what the Python
1679 DBAPI says, it does *not* return the
1680 number of rows available from the results of a SELECT statement
1681 as DBAPIs cannot support this functionality when rows are
1682 unbuffered.
1684 * :attr:`_engine.CursorResult.rowcount`
1685 may not be fully implemented by
1686 all dialects. In particular, most DBAPIs do not support an
1687 aggregate rowcount result from an executemany call.
1688 The :meth:`_engine.CursorResult.supports_sane_rowcount` and
1689 :meth:`_engine.CursorResult.supports_sane_multi_rowcount` methods
1690 will report from the dialect if each usage is known to be
1691 supported.
1693 * Statements that use RETURNING may not return a correct
1694 rowcount.
1696 .. seealso::
1698 :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial`
1700 """ # noqa: E501
1702 try:
1703 return self.context.rowcount
1704 except BaseException as e:
1705 self.cursor_strategy.handle_exception(self, self.cursor, e)
1707 @property
1708 def lastrowid(self):
1709 """Return the 'lastrowid' accessor on the DBAPI cursor.
1711 This is a DBAPI specific method and is only functional
1712 for those backends which support it, for statements
1713 where it is appropriate. It's behavior is not
1714 consistent across backends.
1716 Usage of this method is normally unnecessary when
1717 using insert() expression constructs; the
1718 :attr:`~CursorResult.inserted_primary_key` attribute provides a
1719 tuple of primary key values for a newly inserted row,
1720 regardless of database backend.
1722 """
1723 try:
1724 return self.context.get_lastrowid()
1725 except BaseException as e:
1726 self.cursor_strategy.handle_exception(self, self.cursor, e)
1728 @property
1729 def returns_rows(self):
1730 """True if this :class:`_engine.CursorResult` returns zero or more
1731 rows.
1733 I.e. if it is legal to call the methods
1734 :meth:`_engine.CursorResult.fetchone`,
1735 :meth:`_engine.CursorResult.fetchmany`
1736 :meth:`_engine.CursorResult.fetchall`.
1738 Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
1739 always be synonymous with whether or not the DBAPI cursor had a
1740 ``.description`` attribute, indicating the presence of result columns,
1741 noting that a cursor that returns zero rows still has a
1742 ``.description`` if a row-returning statement was emitted.
1744 This attribute should be True for all results that are against
1745 SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
1746 that use RETURNING. For INSERT/UPDATE/DELETE statements that were
1747 not using RETURNING, the value will usually be False, however
1748 there are some dialect-specific exceptions to this, such as when
1749 using the MSSQL / pyodbc dialect a SELECT is emitted inline in
1750 order to retrieve an inserted primary key value.
1753 """
1754 return self._metadata.returns_rows
1756 @property
1757 def is_insert(self):
1758 """True if this :class:`_engine.CursorResult` is the result
1759 of a executing an expression language compiled
1760 :func:`_expression.insert` construct.
1762 When True, this implies that the
1763 :attr:`inserted_primary_key` attribute is accessible,
1764 assuming the statement did not include
1765 a user defined "returning" construct.
1767 """
1768 return self.context.isinsert
1771class CursorResult(BaseCursorResult, Result):
1772 """A Result that is representing state from a DBAPI cursor.
1774 .. versionchanged:: 1.4 The :class:`.CursorResult` and
1775 :class:`.LegacyCursorResult`
1776 classes replace the previous :class:`.ResultProxy` interface.
1777 These classes are based on the :class:`.Result` calling API
1778 which provides an updated usage model and calling facade for
1779 SQLAlchemy Core and SQLAlchemy ORM.
1781 Returns database rows via the :class:`.Row` class, which provides
1782 additional API features and behaviors on top of the raw data returned by
1783 the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
1784 method, other kinds of objects may also be returned.
1786 Within the scope of the 1.x series of SQLAlchemy, Core SQL results in
1787 version 1.4 return an instance of :class:`._engine.LegacyCursorResult`
1788 which takes the place of the ``CursorResult`` class used for the 1.3 series
1789 and previously. This object returns rows as :class:`.LegacyRow` objects,
1790 which maintains Python mapping (i.e. dictionary) like behaviors upon the
1791 object itself. Going forward, the :attr:`.Row._mapping` attribute should
1792 be used for dictionary behaviors.
1794 .. seealso::
1796 :ref:`coretutorial_selecting` - introductory material for accessing
1797 :class:`_engine.CursorResult` and :class:`.Row` objects.
1799 """
1801 _cursor_metadata = CursorResultMetaData
1802 _cursor_strategy_cls = CursorFetchStrategy
1803 _no_result_metadata = _NO_RESULT_METADATA
1804 _is_cursor = True
1806 def _fetchiter_impl(self):
1807 fetchone = self.cursor_strategy.fetchone
1809 while True:
1810 row = fetchone(self, self.cursor)
1811 if row is None:
1812 break
1813 yield row
1815 def _fetchone_impl(self, hard_close=False):
1816 return self.cursor_strategy.fetchone(self, self.cursor, hard_close)
1818 def _fetchall_impl(self):
1819 return self.cursor_strategy.fetchall(self, self.cursor)
1821 def _fetchmany_impl(self, size=None):
1822 return self.cursor_strategy.fetchmany(self, self.cursor, size)
1824 def _raw_row_iterator(self):
1825 return self._fetchiter_impl()
1827 def merge(self, *others):
1828 merged_result = super(CursorResult, self).merge(*others)
1829 setup_rowcounts = not self._metadata.returns_rows
1830 if setup_rowcounts:
1831 merged_result.rowcount = sum(
1832 result.rowcount for result in (self,) + others
1833 )
1834 return merged_result
1836 def close(self):
1837 """Close this :class:`_engine.CursorResult`.
1839 This closes out the underlying DBAPI cursor corresponding to the
1840 statement execution, if one is still present. Note that the DBAPI
1841 cursor is automatically released when the :class:`_engine.CursorResult`
1842 exhausts all available rows. :meth:`_engine.CursorResult.close` is
1843 generally an optional method except in the case when discarding a
1844 :class:`_engine.CursorResult` that still has additional rows pending
1845 for fetch.
1847 After this method is called, it is no longer valid to call upon
1848 the fetch methods, which will raise a :class:`.ResourceClosedError`
1849 on subsequent use.
1851 .. seealso::
1853 :ref:`connections_toplevel`
1855 """
1856 self._soft_close(hard=True)
1858 @_generative
1859 def yield_per(self, num):
1860 self._yield_per = num
1861 self.cursor_strategy.yield_per(self, self.cursor, num)
1864class LegacyCursorResult(CursorResult):
1865 """Legacy version of :class:`.CursorResult`.
1867 This class includes connection "connection autoclose" behavior for use with
1868 "connectionless" execution, as well as delivers rows using the
1869 :class:`.LegacyRow` row implementation.
1871 .. versionadded:: 1.4
1873 """
1875 _autoclose_connection = False
1876 _process_row = LegacyRow
1877 _cursor_metadata = LegacyCursorResultMetaData
1878 _cursor_strategy_cls = CursorFetchStrategy
1880 _no_result_metadata = _LEGACY_NO_RESULT_METADATA
1882 def close(self):
1883 """Close this :class:`_engine.LegacyCursorResult`.
1885 This method has the same behavior as that of
1886 :meth:`._engine.CursorResult`, but it also may close
1887 the underlying :class:`.Connection` for the case of "connectionless"
1888 execution.
1890 .. deprecated:: 2.0 "connectionless" execution is deprecated and will
1891 be removed in version 2.0. Version 2.0 will feature the
1892 :class:`_future.Result`
1893 object that will no longer affect the status
1894 of the originating connection in any case.
1896 After this method is called, it is no longer valid to call upon
1897 the fetch methods, which will raise a :class:`.ResourceClosedError`
1898 on subsequent use.
1900 .. seealso::
1902 :ref:`connections_toplevel`
1904 :ref:`dbengine_implicit`
1905 """
1906 self._soft_close(hard=True)
1908 def _soft_close(self, hard=False):
1909 soft_closed = self._soft_closed
1910 super(LegacyCursorResult, self)._soft_close(hard=hard)
1911 if (
1912 not soft_closed
1913 and self._soft_closed
1914 and self._autoclose_connection
1915 ):
1916 self.connection.close()
1919ResultProxy = LegacyCursorResult
1922class BufferedRowResultProxy(ResultProxy):
1923 """A ResultProxy with row buffering behavior.
1925 .. deprecated:: 1.4 this class is now supplied using a strategy object.
1926 See :class:`.BufferedRowCursorFetchStrategy`.
1928 """
1930 _cursor_strategy_cls = BufferedRowCursorFetchStrategy
1933class FullyBufferedResultProxy(ResultProxy):
1934 """A result proxy that buffers rows fully upon creation.
1936 .. deprecated:: 1.4 this class is now supplied using a strategy object.
1937 See :class:`.FullyBufferedCursorFetchStrategy`.
1939 """
1941 _cursor_strategy_cls = FullyBufferedCursorFetchStrategy
1944class BufferedColumnRow(LegacyRow):
1945 """Row is now BufferedColumn in all cases"""
1948class BufferedColumnResultProxy(ResultProxy):
1949 """A ResultProxy with column buffering behavior.
1951 .. versionchanged:: 1.4 This is now the default behavior of the Row
1952 and this class does not change behavior in any way.
1954 """
1956 _process_row = BufferedColumnRow