Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/jedi/inference/syntax_tree.py: 11%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

575 statements  

1""" 

2Functions inferring the syntax tree. 

3""" 

4import copy 

5import itertools 

6 

7from parso.python import tree 

8 

9from jedi import debug 

10from jedi import parser_utils 

11from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ 

12 iterator_to_value_set, iterate_values 

13from jedi.inference.lazy_value import LazyTreeValue 

14from jedi.inference import compiled 

15from jedi.inference import recursion 

16from jedi.inference import analysis 

17from jedi.inference import imports 

18from jedi.inference import arguments 

19from jedi.inference.value import ClassValue, FunctionValue 

20from jedi.inference.value import iterable 

21from jedi.inference.value.dynamic_arrays import ListModification, DictModification 

22from jedi.inference.value import TreeInstance 

23from jedi.inference.helpers import is_string, is_literal, is_number, \ 

24 get_names_of_node, is_big_annoying_library 

25from jedi.inference.compiled.access import COMPARISON_OPERATORS 

26from jedi.inference.cache import inference_state_method_cache 

27from jedi.inference.gradual.stub_value import VersionInfo 

28from jedi.inference.gradual import annotation 

29from jedi.inference.names import TreeNameDefinition 

30from jedi.inference.context import CompForContext 

31from jedi.inference.value.decorator import Decoratee 

32from jedi.plugins import plugin_manager 

33 

34operator_to_magic_method = { 

35 '+': '__add__', 

36 '-': '__sub__', 

37 '*': '__mul__', 

38 '@': '__matmul__', 

39 '/': '__truediv__', 

40 '//': '__floordiv__', 

41 '%': '__mod__', 

42 '**': '__pow__', 

43 '<<': '__lshift__', 

44 '>>': '__rshift__', 

45 '&': '__and__', 

46 '|': '__or__', 

47 '^': '__xor__', 

48} 

49 

50reverse_operator_to_magic_method = { 

51 k: '__r' + v[2:] for k, v in operator_to_magic_method.items() 

52} 

53 

54 

55def _limit_value_infers(func): 

56 """ 

57 This is for now the way how we limit type inference going wild. There are 

58 other ways to ensure recursion limits as well. This is mostly necessary 

59 because of instance (self) access that can be quite tricky to limit. 

60 

61 I'm still not sure this is the way to go, but it looks okay for now and we 

62 can still go anther way in the future. Tests are there. ~ dave 

63 """ 

64 def wrapper(context, *args, **kwargs): 

65 n = context.tree_node 

66 inference_state = context.inference_state 

67 try: 

68 inference_state.inferred_element_counts[n] += 1 

69 maximum = 300 

70 if context.parent_context is None \ 

71 and context.get_value() is inference_state.builtins_module: 

72 # Builtins should have a more generous inference limit. 

73 # It is important that builtins can be executed, otherwise some 

74 # functions that depend on certain builtins features would be 

75 # broken, see e.g. GH #1432 

76 maximum *= 100 

77 

78 if inference_state.inferred_element_counts[n] > maximum: 

79 debug.warning('In value %s there were too many inferences.', n) 

80 return NO_VALUES 

81 except KeyError: 

82 inference_state.inferred_element_counts[n] = 1 

83 return func(context, *args, **kwargs) 

84 

85 return wrapper 

86 

87 

88def infer_node(context, element): 

89 if isinstance(context, CompForContext): 

90 return _infer_node(context, element) 

91 

92 if_stmt = element 

93 while if_stmt is not None: 

94 if_stmt = if_stmt.parent 

95 if if_stmt.type in ('if_stmt', 'for_stmt'): 

96 break 

97 if parser_utils.is_scope(if_stmt): 

98 if_stmt = None 

99 break 

100 predefined_if_name_dict = context.predefined_names.get(if_stmt) 

101 # TODO there's a lot of issues with this one. We actually should do 

102 # this in a different way. Caching should only be active in certain 

103 # cases and this all sucks. 

104 if predefined_if_name_dict is None and if_stmt \ 

105 and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: 

106 if_stmt_test = if_stmt.children[1] 

107 name_dicts = [{}] 

108 # If we already did a check, we don't want to do it again -> If 

109 # value.predefined_names is filled, we stop. 

110 # We don't want to check the if stmt itself, it's just about 

111 # the content. 

112 if element.start_pos > if_stmt_test.end_pos: 

113 # Now we need to check if the names in the if_stmt match the 

114 # names in the suite. 

115 if_names = get_names_of_node(if_stmt_test) 

116 element_names = get_names_of_node(element) 

117 str_element_names = [e.value for e in element_names] 

118 if any(i.value in str_element_names for i in if_names): 

119 for if_name in if_names: 

120 definitions = context.inference_state.infer(context, if_name) 

121 # Every name that has multiple different definitions 

122 # causes the complexity to rise. The complexity should 

123 # never fall below 1. 

124 if len(definitions) > 1: 

125 if len(name_dicts) * len(definitions) > 16: 

126 debug.dbg('Too many options for if branch inference %s.', if_stmt) 

127 # There's only a certain amount of branches 

128 # Jedi can infer, otherwise it will take to 

129 # long. 

130 name_dicts = [{}] 

131 break 

132 

133 original_name_dicts = list(name_dicts) 

134 name_dicts = [] 

135 for definition in definitions: 

136 new_name_dicts = list(original_name_dicts) 

137 for i, name_dict in enumerate(new_name_dicts): 

138 new_name_dicts[i] = name_dict.copy() 

139 new_name_dicts[i][if_name.value] = ValueSet([definition]) 

140 

141 name_dicts += new_name_dicts 

142 else: 

143 for name_dict in name_dicts: 

144 name_dict[if_name.value] = definitions 

145 if len(name_dicts) > 1: 

146 result = NO_VALUES 

147 for name_dict in name_dicts: 

148 with context.predefine_names(if_stmt, name_dict): 

149 result |= _infer_node(context, element) 

150 return result 

151 else: 

152 return _infer_node_if_inferred(context, element) 

153 else: 

154 if predefined_if_name_dict: 

155 return _infer_node(context, element) 

156 else: 

157 return _infer_node_if_inferred(context, element) 

158 

159 

160def _infer_node_if_inferred(context, element): 

161 """ 

162 TODO This function is temporary: Merge with infer_node. 

163 """ 

164 parent = element 

165 while parent is not None: 

166 parent = parent.parent 

167 predefined_if_name_dict = context.predefined_names.get(parent) 

168 if predefined_if_name_dict is not None: 

169 return _infer_node(context, element) 

170 return _infer_node_cached(context, element) 

171 

172 

173@inference_state_method_cache(default=NO_VALUES) 

174def _infer_node_cached(context, element): 

175 return _infer_node(context, element) 

176 

177 

178@debug.increase_indent 

179@_limit_value_infers 

180def _infer_node(context, element): 

181 debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) 

182 inference_state = context.inference_state 

183 typ = element.type 

184 if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): 

185 return infer_atom(context, element) 

186 elif typ == 'lambdef': 

187 return ValueSet([FunctionValue.from_context(context, element)]) 

188 elif typ == 'expr_stmt': 

189 return infer_expr_stmt(context, element) 

190 elif typ in ('power', 'atom_expr'): 

191 first_child = element.children[0] 

192 children = element.children[1:] 

193 had_await = False 

194 if first_child.type == 'keyword' and first_child.value == 'await': 

195 had_await = True 

196 first_child = children.pop(0) 

197 

198 value_set = context.infer_node(first_child) 

199 for (i, trailer) in enumerate(children): 

200 if trailer == '**': # has a power operation. 

201 right = context.infer_node(children[i + 1]) 

202 value_set = _infer_comparison( 

203 context, 

204 value_set, 

205 trailer, 

206 right 

207 ) 

208 break 

209 value_set = infer_trailer(context, value_set, trailer) 

210 

211 if had_await: 

212 return value_set.py__await__().py__stop_iteration_returns() 

213 return value_set 

214 elif typ in ('testlist_star_expr', 'testlist',): 

215 # The implicit tuple in statements. 

216 return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) 

217 elif typ in ('not_test', 'factor'): 

218 value_set = context.infer_node(element.children[-1]) 

219 for operator in element.children[:-1]: 

220 value_set = infer_factor(value_set, operator) 

221 return value_set 

222 elif typ == 'test': 

223 # `x if foo else y` case. 

224 return (context.infer_node(element.children[0]) 

225 | context.infer_node(element.children[-1])) 

226 elif typ == 'operator': 

227 # Must be an ellipsis, other operators are not inferred. 

228 if element.value != '...': 

229 origin = element.parent 

230 raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) 

231 return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')]) 

232 elif typ == 'dotted_name': 

233 value_set = infer_atom(context, element.children[0]) 

234 for next_name in element.children[2::2]: 

235 value_set = value_set.py__getattribute__(next_name, name_context=context) 

236 return value_set 

237 elif typ == 'eval_input': 

238 return context.infer_node(element.children[0]) 

239 elif typ == 'annassign': 

240 return annotation.infer_annotation(context, element.children[1]) \ 

241 .execute_annotation() 

242 elif typ == 'yield_expr': 

243 if len(element.children) and element.children[1].type == 'yield_arg': 

244 # Implies that it's a yield from. 

245 element = element.children[1].children[1] 

246 generators = context.infer_node(element) \ 

247 .py__getattribute__('__iter__').execute_with_values() 

248 return generators.py__stop_iteration_returns() 

249 

250 # Generator.send() is not implemented. 

251 return NO_VALUES 

252 elif typ == 'namedexpr_test': 

253 return context.infer_node(element.children[2]) 

254 else: 

255 return infer_or_test(context, element) 

256 

257 

258def infer_trailer(context, atom_values, trailer): 

259 trailer_op, node = trailer.children[:2] 

260 if node == ')': # `arglist` is optional. 

261 node = None 

262 

263 if trailer_op == '[': 

264 trailer_op, node, _ = trailer.children 

265 return atom_values.get_item( 

266 _infer_subscript_list(context, node), 

267 ContextualizedNode(context, trailer) 

268 ) 

269 else: 

270 debug.dbg('infer_trailer: %s in %s', trailer, atom_values) 

271 if trailer_op == '.': 

272 return atom_values.py__getattribute__( 

273 name_context=context, 

274 name_or_str=node 

275 ) 

276 else: 

277 assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op 

278 args = arguments.TreeArguments(context.inference_state, context, node, trailer) 

279 return atom_values.execute(args) 

280 

281 

282def infer_atom(context, atom): 

283 """ 

284 Basically to process ``atom`` nodes. The parser sometimes doesn't 

285 generate the node (because it has just one child). In that case an atom 

286 might be a name or a literal as well. 

287 """ 

288 state = context.inference_state 

289 if atom.type == 'name': 

290 # This is the first global lookup. 

291 stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom 

292 if stmt.type == 'if_stmt': 

293 if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()): 

294 stmt = atom 

295 elif stmt.type == 'lambdef': 

296 stmt = atom 

297 position = stmt.start_pos 

298 if _is_annotation_name(atom): 

299 # Since Python 3.7 (with from __future__ import annotations), 

300 # annotations are essentially strings and can reference objects 

301 # that are defined further down in code. Therefore just set the 

302 # position to None, so the finder will not try to stop at a certain 

303 # position in the module. 

304 position = None 

305 return context.py__getattribute__(atom, position=position) 

306 elif atom.type == 'keyword': 

307 # For False/True/None 

308 if atom.value in ('False', 'True', 'None'): 

309 return ValueSet([compiled.builtin_from_name(state, atom.value)]) 

310 elif atom.value == 'yield': 

311 # Contrary to yield from, yield can just appear alone to return a 

312 # value when used with `.send()`. 

313 return NO_VALUES 

314 assert False, 'Cannot infer the keyword %s' % atom 

315 

316 elif isinstance(atom, tree.Literal): 

317 string = state.compiled_subprocess.safe_literal_eval(atom.value) 

318 return ValueSet([compiled.create_simple_object(state, string)]) 

319 elif atom.type == 'strings': 

320 # Will be multiple string. 

321 value_set = infer_atom(context, atom.children[0]) 

322 for string in atom.children[1:]: 

323 right = infer_atom(context, string) 

324 value_set = _infer_comparison(context, value_set, '+', right) 

325 return value_set 

326 elif atom.type == 'fstring': 

327 return compiled.get_string_value_set(state) 

328 else: 

329 c = atom.children 

330 # Parentheses without commas are not tuples. 

331 if c[0] == '(' and not len(c) == 2 \ 

332 and not (c[1].type == 'testlist_comp' 

333 and len(c[1].children) > 1): 

334 return context.infer_node(c[1]) 

335 

336 try: 

337 comp_for = c[1].children[1] 

338 except (IndexError, AttributeError): 

339 pass 

340 else: 

341 if comp_for == ':': 

342 # Dict comprehensions have a colon at the 3rd index. 

343 try: 

344 comp_for = c[1].children[3] 

345 except IndexError: 

346 pass 

347 

348 if comp_for.type in ('comp_for', 'sync_comp_for'): 

349 return ValueSet([iterable.comprehension_from_atom( 

350 state, context, atom 

351 )]) 

352 

353 # It's a dict/list/tuple literal. 

354 array_node = c[1] 

355 try: 

356 array_node_c = array_node.children 

357 except AttributeError: 

358 array_node_c = [] 

359 if c[0] == '{' and (array_node == '}' or ':' in array_node_c 

360 or '**' in array_node_c): 

361 new_value = iterable.DictLiteralValue(state, context, atom) 

362 else: 

363 new_value = iterable.SequenceLiteralValue(state, context, atom) 

364 return ValueSet([new_value]) 

365 

366 

367@_limit_value_infers 

368def infer_expr_stmt(context, stmt, seek_name=None): 

369 with recursion.execution_allowed(context.inference_state, stmt) as allowed: 

370 if allowed: 

371 if seek_name is not None: 

372 pep0484_values = \ 

373 annotation.find_type_from_comment_hint_assign(context, stmt, seek_name) 

374 if pep0484_values: 

375 return pep0484_values 

376 

377 return _infer_expr_stmt(context, stmt, seek_name) 

378 return NO_VALUES 

379 

380 

381@debug.increase_indent 

382def _infer_expr_stmt(context, stmt, seek_name=None): 

383 """ 

384 The starting point of the completion. A statement always owns a call 

385 list, which are the calls, that a statement does. In case multiple 

386 names are defined in the statement, `seek_name` returns the result for 

387 this name. 

388 

389 expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | 

390 ('=' (yield_expr|testlist_star_expr))*) 

391 annassign: ':' test ['=' test] 

392 augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | 

393 '<<=' | '>>=' | '**=' | '//=') 

394 

395 :param stmt: A `tree.ExprStmt`. 

396 """ 

397 def check_setitem(stmt): 

398 atom_expr = stmt.children[0] 

399 if atom_expr.type not in ('atom_expr', 'power'): 

400 return False, None 

401 name = atom_expr.children[0] 

402 if name.type != 'name' or len(atom_expr.children) != 2: 

403 return False, None 

404 trailer = atom_expr.children[-1] 

405 return trailer.children[0] == '[', trailer.children[1] 

406 

407 debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) 

408 rhs = stmt.get_rhs() 

409 

410 value_set = context.infer_node(rhs) 

411 

412 if seek_name: 

413 n = TreeNameDefinition(context, seek_name) 

414 value_set = check_tuple_assignments(n, value_set) 

415 

416 first_operator = next(stmt.yield_operators(), None) 

417 is_setitem, subscriptlist = check_setitem(stmt) 

418 is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' 

419 if is_annassign or is_setitem: 

420 # `=` is always the last character in aug assignments -> -1 

421 name = stmt.get_defined_names(include_setitem=True)[0].value 

422 left_values = context.py__getattribute__(name, position=stmt.start_pos) 

423 

424 if is_setitem: 

425 def to_mod(v): 

426 c = ContextualizedSubscriptListNode(context, subscriptlist) 

427 if v.array_type == 'dict': 

428 return DictModification(v, value_set, c) 

429 elif v.array_type == 'list': 

430 return ListModification(v, value_set, c) 

431 return v 

432 

433 value_set = ValueSet(to_mod(v) for v in left_values) 

434 else: 

435 operator = copy.copy(first_operator) 

436 operator.value = operator.value[:-1] 

437 for_stmt = tree.search_ancestor(stmt, 'for_stmt') 

438 if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ 

439 and parser_utils.for_stmt_defines_one_name(for_stmt): 

440 # Iterate through result and add the values, that's possible 

441 # only in for loops without clutter, because they are 

442 # predictable. Also only do it, if the variable is not a tuple. 

443 node = for_stmt.get_testlist() 

444 cn = ContextualizedNode(context, node) 

445 ordered = list(cn.infer().iterate(cn)) 

446 

447 for lazy_value in ordered: 

448 dct = {for_stmt.children[1].value: lazy_value.infer()} 

449 with context.predefine_names(for_stmt, dct): 

450 t = context.infer_node(rhs) 

451 left_values = _infer_comparison(context, left_values, operator, t) 

452 value_set = left_values 

453 else: 

454 value_set = _infer_comparison(context, left_values, operator, value_set) 

455 debug.dbg('infer_expr_stmt result %s', value_set) 

456 return value_set 

457 

458 

459def infer_or_test(context, or_test): 

460 iterator = iter(or_test.children) 

461 types = context.infer_node(next(iterator)) 

462 for operator in iterator: 

463 right = next(iterator) 

464 if operator.type == 'comp_op': # not in / is not 

465 operator = ' '.join(c.value for c in operator.children) 

466 

467 # handle type inference of and/or here. 

468 if operator in ('and', 'or'): 

469 left_bools = set(left.py__bool__() for left in types) 

470 if left_bools == {True}: 

471 if operator == 'and': 

472 types = context.infer_node(right) 

473 elif left_bools == {False}: 

474 if operator != 'and': 

475 types = context.infer_node(right) 

476 # Otherwise continue, because of uncertainty. 

477 else: 

478 types = _infer_comparison(context, types, operator, 

479 context.infer_node(right)) 

480 debug.dbg('infer_or_test types %s', types) 

481 return types 

482 

483 

484@iterator_to_value_set 

485def infer_factor(value_set, operator): 

486 """ 

487 Calculates `+`, `-`, `~` and `not` prefixes. 

488 """ 

489 for value in value_set: 

490 if operator == '-': 

491 if is_number(value): 

492 yield value.negate() 

493 elif operator == 'not': 

494 b = value.py__bool__() 

495 if b is None: # Uncertainty. 

496 yield list(value.inference_state.builtins_module.py__getattribute__('bool') 

497 .execute_annotation()).pop() 

498 else: 

499 yield compiled.create_simple_object(value.inference_state, not b) 

500 else: 

501 yield value 

502 

503 

504def _literals_to_types(inference_state, result): 

505 # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), 

506 # int(), float(), etc). 

507 new_result = NO_VALUES 

508 for typ in result: 

509 if is_literal(typ): 

510 # Literals are only valid as long as the operations are 

511 # correct. Otherwise add a value-free instance. 

512 cls = compiled.builtin_from_name(inference_state, typ.name.string_name) 

513 new_result |= cls.execute_with_values() 

514 else: 

515 new_result |= ValueSet([typ]) 

516 return new_result 

517 

518 

519def _infer_comparison(context, left_values, operator, right_values): 

520 state = context.inference_state 

521 if isinstance(operator, str): 

522 operator_str = operator 

523 else: 

524 operator_str = str(operator.value) 

525 if not left_values or not right_values: 

526 # illegal slices e.g. cause left/right_result to be None 

527 result = (left_values or NO_VALUES) | (right_values or NO_VALUES) 

528 return _literals_to_types(state, result) 

529 elif operator_str == "|" and all( 

530 value.is_class() or value.is_compiled() 

531 for value in itertools.chain(left_values, right_values) 

532 ): 

533 # ^^^ A naive hack for PEP 604 

534 return ValueSet.from_sets((left_values, right_values)) 

535 else: 

536 # I don't think there's a reasonable chance that a string 

537 # operation is still correct, once we pass something like six 

538 # objects. 

539 if len(left_values) * len(right_values) > 6: 

540 return _literals_to_types(state, left_values | right_values) 

541 else: 

542 return ValueSet.from_sets( 

543 _infer_comparison_part(state, context, left, operator, right) 

544 for left in left_values 

545 for right in right_values 

546 ) 

547 

548 

549def _is_annotation_name(name): 

550 ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') 

551 if ancestor is None: 

552 return False 

553 

554 if ancestor.type in ('param', 'funcdef'): 

555 ann = ancestor.annotation 

556 if ann is not None: 

557 return ann.start_pos <= name.start_pos < ann.end_pos 

558 elif ancestor.type == 'expr_stmt': 

559 c = ancestor.children 

560 if len(c) > 1 and c[1].type == 'annassign': 

561 return c[1].start_pos <= name.start_pos < c[1].end_pos 

562 return False 

563 

564 

565def _is_list(value): 

566 return value.array_type == 'list' 

567 

568 

569def _is_tuple(value): 

570 return value.array_type == 'tuple' 

571 

572 

573def _bool_to_value(inference_state, bool_): 

574 return compiled.builtin_from_name(inference_state, str(bool_)) 

575 

576 

577def _get_tuple_ints(value): 

578 if not isinstance(value, iterable.SequenceLiteralValue): 

579 return None 

580 numbers = [] 

581 for lazy_value in value.py__iter__(): 

582 if not isinstance(lazy_value, LazyTreeValue): 

583 return None 

584 node = lazy_value.data 

585 if node.type != 'number': 

586 return None 

587 try: 

588 numbers.append(int(node.value)) 

589 except ValueError: 

590 return None 

591 return numbers 

592 

593 

594def _infer_comparison_part(inference_state, context, left, operator, right): 

595 l_is_num = is_number(left) 

596 r_is_num = is_number(right) 

597 if isinstance(operator, str): 

598 str_operator = operator 

599 else: 

600 str_operator = str(operator.value) 

601 

602 if str_operator == '*': 

603 # for iterables, ignore * operations 

604 if isinstance(left, iterable.Sequence) or is_string(left): 

605 return ValueSet([left]) 

606 elif isinstance(right, iterable.Sequence) or is_string(right): 

607 return ValueSet([right]) 

608 elif str_operator == '+': 

609 if l_is_num and r_is_num or is_string(left) and is_string(right): 

610 return left.execute_operation(right, str_operator) 

611 elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): 

612 return ValueSet([iterable.MergedArray(inference_state, (left, right))]) 

613 elif str_operator == '-': 

614 if l_is_num and r_is_num: 

615 return left.execute_operation(right, str_operator) 

616 elif str_operator == '%': 

617 # With strings and numbers the left type typically remains. Except for 

618 # `int() % float()`. 

619 return ValueSet([left]) 

620 elif str_operator in COMPARISON_OPERATORS: 

621 if left.is_compiled() and right.is_compiled(): 

622 # Possible, because the return is not an option. Just compare. 

623 result = left.execute_operation(right, str_operator) 

624 if result: 

625 return result 

626 else: 

627 if str_operator in ('is', '!=', '==', 'is not'): 

628 operation = COMPARISON_OPERATORS[str_operator] 

629 bool_ = operation(left, right) 

630 # Only if == returns True or != returns False, we can continue. 

631 # There's no guarantee that they are not equal. This can help 

632 # in some cases, but does not cover everything. 

633 if (str_operator in ('is', '==')) == bool_: 

634 return ValueSet([_bool_to_value(inference_state, bool_)]) 

635 

636 if isinstance(left, VersionInfo): 

637 version_info = _get_tuple_ints(right) 

638 if version_info is not None: 

639 bool_result = compiled.access.COMPARISON_OPERATORS[operator]( 

640 inference_state.environment.version_info, 

641 tuple(version_info) 

642 ) 

643 return ValueSet([_bool_to_value(inference_state, bool_result)]) 

644 

645 return ValueSet([ 

646 _bool_to_value(inference_state, True), 

647 _bool_to_value(inference_state, False) 

648 ]) 

649 elif str_operator in ('in', 'not in'): 

650 return inference_state.builtins_module.py__getattribute__('bool').execute_annotation() 

651 

652 def check(obj): 

653 """Checks if a Jedi object is either a float or an int.""" 

654 return isinstance(obj, TreeInstance) and \ 

655 obj.name.string_name in ('int', 'float') 

656 

657 # Static analysis, one is a number, the other one is not. 

658 if str_operator in ('+', '-') and l_is_num != r_is_num \ 

659 and not (check(left) or check(right)): 

660 message = "TypeError: unsupported operand type(s) for +: %s and %s" 

661 analysis.add(context, 'type-error-operation', operator, 

662 message % (left, right)) 

663 

664 if left.is_class() or right.is_class(): 

665 return NO_VALUES 

666 

667 method_name = operator_to_magic_method[str_operator] 

668 magic_methods = left.py__getattribute__(method_name) 

669 if magic_methods: 

670 result = magic_methods.execute_with_values(right) 

671 if result: 

672 return result 

673 

674 if not magic_methods: 

675 reverse_method_name = reverse_operator_to_magic_method[str_operator] 

676 magic_methods = right.py__getattribute__(reverse_method_name) 

677 

678 result = magic_methods.execute_with_values(left) 

679 if result: 

680 return result 

681 

682 result = ValueSet([left, right]) 

683 debug.dbg('Used operator %s resulting in %s', operator, result) 

684 return result 

685 

686 

687@plugin_manager.decorate() 

688def tree_name_to_values(inference_state, context, tree_name): 

689 value_set = NO_VALUES 

690 module_node = context.get_root_context().tree_node 

691 # First check for annotations, like: `foo: int = 3` 

692 if module_node is not None: 

693 names = module_node.get_used_names().get(tree_name.value, []) 

694 found_annotation = False 

695 for name in names: 

696 expr_stmt = name.parent 

697 

698 if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": 

699 correct_scope = parser_utils.get_parent_scope(name) == context.tree_node 

700 ann_assign = expr_stmt.children[1] 

701 if correct_scope: 

702 found_annotation = True 

703 if ( 

704 (ann_assign.children[1].type == 'name') 

705 and (ann_assign.children[1].value == tree_name.value) 

706 and context.parent_context 

707 ): 

708 context = context.parent_context 

709 value_set |= annotation.infer_annotation( 

710 context, expr_stmt.children[1].children[1] 

711 ).execute_annotation() 

712 if found_annotation: 

713 return value_set 

714 

715 types = [] 

716 node = tree_name.get_definition(import_name_always=True, include_setitem=True) 

717 if node is None: 

718 node = tree_name.parent 

719 if node.type == 'global_stmt': 

720 c = context.create_context(tree_name) 

721 if c.is_module(): 

722 # In case we are already part of the module, there is no point 

723 # in looking up the global statement anymore, because it's not 

724 # valid at that point anyway. 

725 return NO_VALUES 

726 # For global_stmt lookups, we only need the first possible scope, 

727 # which means the function itself. 

728 filter = next(c.get_filters()) 

729 names = filter.get(tree_name.value) 

730 return ValueSet.from_sets(name.infer() for name in names) 

731 elif node.type not in ('import_from', 'import_name'): 

732 c = context.create_context(tree_name) 

733 return infer_atom(c, tree_name) 

734 

735 typ = node.type 

736 if typ == 'for_stmt': 

737 types = annotation.find_type_from_comment_hint_for(context, node, tree_name) 

738 if types: 

739 return types 

740 if typ == 'with_stmt': 

741 types = annotation.find_type_from_comment_hint_with(context, node, tree_name) 

742 if types: 

743 return types 

744 

745 if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): 

746 try: 

747 types = context.predefined_names[node][tree_name.value] 

748 except KeyError: 

749 cn = ContextualizedNode(context, node.children[3]) 

750 for_types = iterate_values( 

751 cn.infer(), 

752 contextualized_node=cn, 

753 is_async=node.parent.type == 'async_stmt', 

754 ) 

755 n = TreeNameDefinition(context, tree_name) 

756 types = check_tuple_assignments(n, for_types) 

757 elif typ == 'expr_stmt': 

758 types = infer_expr_stmt(context, node, tree_name) 

759 elif typ == 'with_stmt': 

760 value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) 

761 if node.parent.type == 'async_stmt': 

762 # In the case of `async with` statements, we need to 

763 # first get the coroutine from the `__aenter__` method, 

764 # then "unwrap" via the `__await__` method 

765 enter_methods = value_managers.py__getattribute__('__aenter__') 

766 coro = enter_methods.execute_with_values() 

767 return coro.py__await__().py__stop_iteration_returns() 

768 enter_methods = value_managers.py__getattribute__('__enter__') 

769 return enter_methods.execute_with_values() 

770 elif typ in ('import_from', 'import_name'): 

771 types = imports.infer_import(context, tree_name) 

772 elif typ in ('funcdef', 'classdef'): 

773 types = _apply_decorators(context, node) 

774 elif typ == 'try_stmt': 

775 # TODO an exception can also be a tuple. Check for those. 

776 # TODO check for types that are not classes and add it to 

777 # the static analysis report. 

778 exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) 

779 types = exceptions.execute_with_values() 

780 elif typ == 'param': 

781 types = NO_VALUES 

782 elif typ == 'del_stmt': 

783 types = NO_VALUES 

784 elif typ == 'namedexpr_test': 

785 types = infer_node(context, node) 

786 else: 

787 raise ValueError("Should not happen. type: %s" % typ) 

788 return types 

789 

790 

791# We don't want to have functions/classes that are created by the same 

792# tree_node. 

793@inference_state_method_cache() 

794def _apply_decorators(context, node): 

795 """ 

796 Returns the function, that should to be executed in the end. 

797 This is also the places where the decorators are processed. 

798 """ 

799 if node.type == 'classdef': 

800 decoratee_value = ClassValue( 

801 context.inference_state, 

802 parent_context=context, 

803 tree_node=node 

804 ) 

805 else: 

806 decoratee_value = FunctionValue.from_context(context, node) 

807 initial = values = ValueSet([decoratee_value]) 

808 

809 if is_big_annoying_library(context): 

810 return values 

811 

812 for dec in reversed(node.get_decorators()): 

813 debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") 

814 with debug.increase_indent_cm(): 

815 dec_values = context.infer_node(dec.children[1]) 

816 trailer_nodes = dec.children[2:-1] 

817 if trailer_nodes: 

818 # Create a trailer and infer it. 

819 trailer = tree.PythonNode('trailer', trailer_nodes) 

820 trailer.parent = dec 

821 dec_values = infer_trailer(context, dec_values, trailer) 

822 

823 if not len(dec_values): 

824 code = dec.get_code(include_prefix=False) 

825 # For the short future, we don't want to hear about the runtime 

826 # decorator in typing that was intentionally omitted. This is not 

827 # "correct", but helps with debugging. 

828 if code != '@runtime\n': 

829 debug.warning('decorator not found: %s on %s', dec, node) 

830 return initial 

831 

832 values = dec_values.execute(arguments.ValuesArguments([values])) 

833 if not len(values): 

834 debug.warning('not possible to resolve wrappers found %s', node) 

835 return initial 

836 

837 debug.dbg('decorator end %s', values, color="MAGENTA") 

838 if values != initial: 

839 return ValueSet([Decoratee(c, decoratee_value) for c in values]) 

840 return values 

841 

842 

843def check_tuple_assignments(name, value_set): 

844 """ 

845 Checks if tuples are assigned. 

846 """ 

847 lazy_value = None 

848 for index, node in name.assignment_indexes(): 

849 cn = ContextualizedNode(name.parent_context, node) 

850 iterated = value_set.iterate(cn) 

851 if isinstance(index, slice): 

852 # For no star unpacking is not possible. 

853 return NO_VALUES 

854 i = 0 

855 while i <= index: 

856 try: 

857 lazy_value = next(iterated) 

858 except StopIteration: 

859 # We could do this with the default param in next. But this 

860 # would allow this loop to run for a very long time if the 

861 # index number is high. Therefore break if the loop is 

862 # finished. 

863 return NO_VALUES 

864 else: 

865 i += lazy_value.max 

866 value_set = lazy_value.infer() 

867 return value_set 

868 

869 

870class ContextualizedSubscriptListNode(ContextualizedNode): 

871 def infer(self): 

872 return _infer_subscript_list(self.context, self.node) 

873 

874 

875def _infer_subscript_list(context, index): 

876 """ 

877 Handles slices in subscript nodes. 

878 """ 

879 if index == ':': 

880 # Like array[:] 

881 return ValueSet([iterable.Slice(context, None, None, None)]) 

882 

883 elif index.type == 'subscript' and not index.children[0] == '.': 

884 # subscript basically implies a slice operation 

885 # e.g. array[:3] 

886 result = [] 

887 for el in index.children: 

888 if el == ':': 

889 if not result: 

890 result.append(None) 

891 elif el.type == 'sliceop': 

892 if len(el.children) == 2: 

893 result.append(el.children[1]) 

894 else: 

895 result.append(el) 

896 result += [None] * (3 - len(result)) 

897 

898 return ValueSet([iterable.Slice(context, *result)]) 

899 elif index.type == 'subscriptlist': 

900 return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) 

901 

902 # No slices 

903 return context.infer_node(index)