Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/jedi-0.19.2-py3.11.egg/jedi/inference/syntax_tree.py: 11%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

577 statements  

1""" 

2Functions inferring the syntax tree. 

3""" 

4import copy 

5import itertools 

6 

7from parso.python import tree 

8 

9from jedi import debug 

10from jedi import parser_utils 

11from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ 

12 iterator_to_value_set, iterate_values 

13from jedi.inference.lazy_value import LazyTreeValue 

14from jedi.inference import compiled 

15from jedi.inference import recursion 

16from jedi.inference import analysis 

17from jedi.inference import imports 

18from jedi.inference import arguments 

19from jedi.inference.value import ClassValue, FunctionValue 

20from jedi.inference.value import iterable 

21from jedi.inference.value.dynamic_arrays import ListModification, DictModification 

22from jedi.inference.value import TreeInstance 

23from jedi.inference.helpers import is_string, is_literal, is_number, \ 

24 get_names_of_node, is_big_annoying_library 

25from jedi.inference.compiled.access import COMPARISON_OPERATORS 

26from jedi.inference.cache import inference_state_method_cache 

27from jedi.inference.gradual.stub_value import VersionInfo 

28from jedi.inference.gradual import annotation 

29from jedi.inference.names import TreeNameDefinition 

30from jedi.inference.context import CompForContext 

31from jedi.inference.value.decorator import Decoratee 

32from jedi.plugins import plugin_manager 

33 

34operator_to_magic_method = { 

35 '+': '__add__', 

36 '-': '__sub__', 

37 '*': '__mul__', 

38 '@': '__matmul__', 

39 '/': '__truediv__', 

40 '//': '__floordiv__', 

41 '%': '__mod__', 

42 '**': '__pow__', 

43 '<<': '__lshift__', 

44 '>>': '__rshift__', 

45 '&': '__and__', 

46 '|': '__or__', 

47 '^': '__xor__', 

48} 

49 

50reverse_operator_to_magic_method = { 

51 k: '__r' + v[2:] for k, v in operator_to_magic_method.items() 

52} 

53 

54 

55def _limit_value_infers(func): 

56 """ 

57 This is for now the way how we limit type inference going wild. There are 

58 other ways to ensure recursion limits as well. This is mostly necessary 

59 because of instance (self) access that can be quite tricky to limit. 

60 

61 I'm still not sure this is the way to go, but it looks okay for now and we 

62 can still go anther way in the future. Tests are there. ~ dave 

63 """ 

64 def wrapper(context, *args, **kwargs): 

65 n = context.tree_node 

66 inference_state = context.inference_state 

67 try: 

68 inference_state.inferred_element_counts[n] += 1 

69 maximum = 300 

70 if context.parent_context is None \ 

71 and context.get_value() is inference_state.builtins_module: 

72 # Builtins should have a more generous inference limit. 

73 # It is important that builtins can be executed, otherwise some 

74 # functions that depend on certain builtins features would be 

75 # broken, see e.g. GH #1432 

76 maximum *= 100 

77 

78 if inference_state.inferred_element_counts[n] > maximum: 

79 debug.warning('In value %s there were too many inferences.', n) 

80 return NO_VALUES 

81 except KeyError: 

82 inference_state.inferred_element_counts[n] = 1 

83 return func(context, *args, **kwargs) 

84 

85 return wrapper 

86 

87 

88def infer_node(context, element): 

89 if isinstance(context, CompForContext): 

90 return _infer_node(context, element) 

91 

92 if_stmt = element 

93 while if_stmt is not None: 

94 if_stmt = if_stmt.parent 

95 if if_stmt.type in ('if_stmt', 'for_stmt'): 

96 break 

97 if parser_utils.is_scope(if_stmt): 

98 if_stmt = None 

99 break 

100 predefined_if_name_dict = context.predefined_names.get(if_stmt) 

101 # TODO there's a lot of issues with this one. We actually should do 

102 # this in a different way. Caching should only be active in certain 

103 # cases and this all sucks. 

104 if predefined_if_name_dict is None and if_stmt \ 

105 and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: 

106 if_stmt_test = if_stmt.children[1] 

107 name_dicts = [{}] 

108 # If we already did a check, we don't want to do it again -> If 

109 # value.predefined_names is filled, we stop. 

110 # We don't want to check the if stmt itself, it's just about 

111 # the content. 

112 if element.start_pos > if_stmt_test.end_pos: 

113 # Now we need to check if the names in the if_stmt match the 

114 # names in the suite. 

115 if_names = get_names_of_node(if_stmt_test) 

116 element_names = get_names_of_node(element) 

117 str_element_names = [e.value for e in element_names] 

118 if any(i.value in str_element_names for i in if_names): 

119 for if_name in if_names: 

120 definitions = context.inference_state.infer(context, if_name) 

121 # Every name that has multiple different definitions 

122 # causes the complexity to rise. The complexity should 

123 # never fall below 1. 

124 if len(definitions) > 1: 

125 if len(name_dicts) * len(definitions) > 16: 

126 debug.dbg('Too many options for if branch inference %s.', if_stmt) 

127 # There's only a certain amount of branches 

128 # Jedi can infer, otherwise it will take to 

129 # long. 

130 name_dicts = [{}] 

131 break 

132 

133 original_name_dicts = list(name_dicts) 

134 name_dicts = [] 

135 for definition in definitions: 

136 new_name_dicts = list(original_name_dicts) 

137 for i, name_dict in enumerate(new_name_dicts): 

138 new_name_dicts[i] = name_dict.copy() 

139 new_name_dicts[i][if_name.value] = ValueSet([definition]) 

140 

141 name_dicts += new_name_dicts 

142 else: 

143 for name_dict in name_dicts: 

144 name_dict[if_name.value] = definitions 

145 if len(name_dicts) > 1: 

146 result = NO_VALUES 

147 for name_dict in name_dicts: 

148 with context.predefine_names(if_stmt, name_dict): 

149 result |= _infer_node(context, element) 

150 return result 

151 else: 

152 return _infer_node_if_inferred(context, element) 

153 else: 

154 if predefined_if_name_dict: 

155 return _infer_node(context, element) 

156 else: 

157 return _infer_node_if_inferred(context, element) 

158 

159 

160def _infer_node_if_inferred(context, element): 

161 """ 

162 TODO This function is temporary: Merge with infer_node. 

163 """ 

164 parent = element 

165 while parent is not None: 

166 parent = parent.parent 

167 predefined_if_name_dict = context.predefined_names.get(parent) 

168 if predefined_if_name_dict is not None: 

169 return _infer_node(context, element) 

170 return _infer_node_cached(context, element) 

171 

172 

173@inference_state_method_cache(default=NO_VALUES) 

174def _infer_node_cached(context, element): 

175 return _infer_node(context, element) 

176 

177 

178@debug.increase_indent 

179@_limit_value_infers 

180def _infer_node(context, element): 

181 debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) 

182 inference_state = context.inference_state 

183 typ = element.type 

184 if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): 

185 return infer_atom(context, element) 

186 elif typ == 'lambdef': 

187 return ValueSet([FunctionValue.from_context(context, element)]) 

188 elif typ == 'expr_stmt': 

189 return infer_expr_stmt(context, element) 

190 elif typ in ('power', 'atom_expr'): 

191 first_child = element.children[0] 

192 children = element.children[1:] 

193 had_await = False 

194 if first_child.type == 'keyword' and first_child.value == 'await': 

195 had_await = True 

196 first_child = children.pop(0) 

197 

198 value_set = context.infer_node(first_child) 

199 for (i, trailer) in enumerate(children): 

200 if trailer == '**': # has a power operation. 

201 right = context.infer_node(children[i + 1]) 

202 value_set = _infer_comparison( 

203 context, 

204 value_set, 

205 trailer, 

206 right 

207 ) 

208 break 

209 value_set = infer_trailer(context, value_set, trailer) 

210 

211 if had_await: 

212 return value_set.py__await__().py__stop_iteration_returns() 

213 return value_set 

214 elif typ in ('testlist_star_expr', 'testlist',): 

215 # The implicit tuple in statements. 

216 return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) 

217 elif typ in ('not_test', 'factor'): 

218 value_set = context.infer_node(element.children[-1]) 

219 for operator in element.children[:-1]: 

220 value_set = infer_factor(value_set, operator) 

221 return value_set 

222 elif typ == 'test': 

223 # `x if foo else y` case. 

224 return (context.infer_node(element.children[0]) 

225 | context.infer_node(element.children[-1])) 

226 elif typ == 'operator': 

227 # Must be an ellipsis, other operators are not inferred. 

228 if element.value != '...': 

229 origin = element.parent 

230 raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) 

231 return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')]) 

232 elif typ == 'dotted_name': 

233 value_set = infer_atom(context, element.children[0]) 

234 for next_name in element.children[2::2]: 

235 value_set = value_set.py__getattribute__(next_name, name_context=context) 

236 return value_set 

237 elif typ == 'eval_input': 

238 return context.infer_node(element.children[0]) 

239 elif typ == 'annassign': 

240 return annotation.infer_annotation(context, element.children[1]) \ 

241 .execute_annotation() 

242 elif typ == 'yield_expr': 

243 if len(element.children) and element.children[1].type == 'yield_arg': 

244 # Implies that it's a yield from. 

245 element = element.children[1].children[1] 

246 generators = context.infer_node(element) \ 

247 .py__getattribute__('__iter__').execute_with_values() 

248 return generators.py__stop_iteration_returns() 

249 

250 # Generator.send() is not implemented. 

251 return NO_VALUES 

252 elif typ == 'namedexpr_test': 

253 return context.infer_node(element.children[2]) 

254 elif typ == 'star_expr': 

255 return NO_VALUES 

256 else: 

257 return infer_or_test(context, element) 

258 

259 

260def infer_trailer(context, atom_values, trailer): 

261 trailer_op, node = trailer.children[:2] 

262 if node == ')': # `arglist` is optional. 

263 node = None 

264 

265 if trailer_op == '[': 

266 trailer_op, node, _ = trailer.children 

267 return atom_values.get_item( 

268 _infer_subscript_list(context, node), 

269 ContextualizedNode(context, trailer) 

270 ) 

271 else: 

272 debug.dbg('infer_trailer: %s in %s', trailer, atom_values) 

273 if trailer_op == '.': 

274 return atom_values.py__getattribute__( 

275 name_context=context, 

276 name_or_str=node 

277 ) 

278 else: 

279 assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op 

280 args = arguments.TreeArguments(context.inference_state, context, node, trailer) 

281 return atom_values.execute(args) 

282 

283 

284def infer_atom(context, atom): 

285 """ 

286 Basically to process ``atom`` nodes. The parser sometimes doesn't 

287 generate the node (because it has just one child). In that case an atom 

288 might be a name or a literal as well. 

289 """ 

290 state = context.inference_state 

291 if atom.type == 'name': 

292 # This is the first global lookup. 

293 stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom 

294 if stmt.type == 'if_stmt': 

295 if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()): 

296 stmt = atom 

297 elif stmt.type == 'lambdef': 

298 stmt = atom 

299 position = stmt.start_pos 

300 if _is_annotation_name(atom): 

301 # Since Python 3.7 (with from __future__ import annotations), 

302 # annotations are essentially strings and can reference objects 

303 # that are defined further down in code. Therefore just set the 

304 # position to None, so the finder will not try to stop at a certain 

305 # position in the module. 

306 position = None 

307 return context.py__getattribute__(atom, position=position) 

308 elif atom.type == 'keyword': 

309 # For False/True/None 

310 if atom.value in ('False', 'True', 'None'): 

311 return ValueSet([compiled.builtin_from_name(state, atom.value)]) 

312 elif atom.value == 'yield': 

313 # Contrary to yield from, yield can just appear alone to return a 

314 # value when used with `.send()`. 

315 return NO_VALUES 

316 assert False, 'Cannot infer the keyword %s' % atom 

317 

318 elif isinstance(atom, tree.Literal): 

319 string = state.compiled_subprocess.safe_literal_eval(atom.value) 

320 return ValueSet([compiled.create_simple_object(state, string)]) 

321 elif atom.type == 'strings': 

322 # Will be multiple string. 

323 value_set = infer_atom(context, atom.children[0]) 

324 for string in atom.children[1:]: 

325 right = infer_atom(context, string) 

326 value_set = _infer_comparison(context, value_set, '+', right) 

327 return value_set 

328 elif atom.type == 'fstring': 

329 return compiled.get_string_value_set(state) 

330 else: 

331 c = atom.children 

332 # Parentheses without commas are not tuples. 

333 if c[0] == '(' and not len(c) == 2 \ 

334 and not (c[1].type == 'testlist_comp' 

335 and len(c[1].children) > 1): 

336 return context.infer_node(c[1]) 

337 

338 try: 

339 comp_for = c[1].children[1] 

340 except (IndexError, AttributeError): 

341 pass 

342 else: 

343 if comp_for == ':': 

344 # Dict comprehensions have a colon at the 3rd index. 

345 try: 

346 comp_for = c[1].children[3] 

347 except IndexError: 

348 pass 

349 

350 if comp_for.type in ('comp_for', 'sync_comp_for'): 

351 return ValueSet([iterable.comprehension_from_atom( 

352 state, context, atom 

353 )]) 

354 

355 # It's a dict/list/tuple literal. 

356 array_node = c[1] 

357 try: 

358 array_node_c = array_node.children 

359 except AttributeError: 

360 array_node_c = [] 

361 if c[0] == '{' and (array_node == '}' or ':' in array_node_c 

362 or '**' in array_node_c): 

363 new_value = iterable.DictLiteralValue(state, context, atom) 

364 else: 

365 new_value = iterable.SequenceLiteralValue(state, context, atom) 

366 return ValueSet([new_value]) 

367 

368 

369@_limit_value_infers 

370def infer_expr_stmt(context, stmt, seek_name=None): 

371 with recursion.execution_allowed(context.inference_state, stmt) as allowed: 

372 if allowed: 

373 if seek_name is not None: 

374 pep0484_values = \ 

375 annotation.find_type_from_comment_hint_assign(context, stmt, seek_name) 

376 if pep0484_values: 

377 return pep0484_values 

378 

379 return _infer_expr_stmt(context, stmt, seek_name) 

380 return NO_VALUES 

381 

382 

383@debug.increase_indent 

384def _infer_expr_stmt(context, stmt, seek_name=None): 

385 """ 

386 The starting point of the completion. A statement always owns a call 

387 list, which are the calls, that a statement does. In case multiple 

388 names are defined in the statement, `seek_name` returns the result for 

389 this name. 

390 

391 expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | 

392 ('=' (yield_expr|testlist_star_expr))*) 

393 annassign: ':' test ['=' test] 

394 augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | 

395 '<<=' | '>>=' | '**=' | '//=') 

396 

397 :param stmt: A `tree.ExprStmt`. 

398 """ 

399 def check_setitem(stmt): 

400 atom_expr = stmt.children[0] 

401 if atom_expr.type not in ('atom_expr', 'power'): 

402 return False, None 

403 name = atom_expr.children[0] 

404 if name.type != 'name' or len(atom_expr.children) != 2: 

405 return False, None 

406 trailer = atom_expr.children[-1] 

407 return trailer.children[0] == '[', trailer.children[1] 

408 

409 debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) 

410 rhs = stmt.get_rhs() 

411 

412 value_set = context.infer_node(rhs) 

413 

414 if seek_name: 

415 n = TreeNameDefinition(context, seek_name) 

416 value_set = check_tuple_assignments(n, value_set) 

417 

418 first_operator = next(stmt.yield_operators(), None) 

419 is_setitem, subscriptlist = check_setitem(stmt) 

420 is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' 

421 if is_annassign or is_setitem: 

422 # `=` is always the last character in aug assignments -> -1 

423 name = stmt.get_defined_names(include_setitem=True)[0].value 

424 left_values = context.py__getattribute__(name, position=stmt.start_pos) 

425 

426 if is_setitem: 

427 def to_mod(v): 

428 c = ContextualizedSubscriptListNode(context, subscriptlist) 

429 if v.array_type == 'dict': 

430 return DictModification(v, value_set, c) 

431 elif v.array_type == 'list': 

432 return ListModification(v, value_set, c) 

433 return v 

434 

435 value_set = ValueSet(to_mod(v) for v in left_values) 

436 else: 

437 operator = copy.copy(first_operator) 

438 operator.value = operator.value[:-1] 

439 for_stmt = tree.search_ancestor(stmt, 'for_stmt') 

440 if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ 

441 and parser_utils.for_stmt_defines_one_name(for_stmt): 

442 # Iterate through result and add the values, that's possible 

443 # only in for loops without clutter, because they are 

444 # predictable. Also only do it, if the variable is not a tuple. 

445 node = for_stmt.get_testlist() 

446 cn = ContextualizedNode(context, node) 

447 ordered = list(cn.infer().iterate(cn)) 

448 

449 for lazy_value in ordered: 

450 dct = {for_stmt.children[1].value: lazy_value.infer()} 

451 with context.predefine_names(for_stmt, dct): 

452 t = context.infer_node(rhs) 

453 left_values = _infer_comparison(context, left_values, operator, t) 

454 value_set = left_values 

455 else: 

456 value_set = _infer_comparison(context, left_values, operator, value_set) 

457 debug.dbg('infer_expr_stmt result %s', value_set) 

458 return value_set 

459 

460 

461def infer_or_test(context, or_test): 

462 iterator = iter(or_test.children) 

463 types = context.infer_node(next(iterator)) 

464 for operator in iterator: 

465 right = next(iterator) 

466 if operator.type == 'comp_op': # not in / is not 

467 operator = ' '.join(c.value for c in operator.children) 

468 

469 # handle type inference of and/or here. 

470 if operator in ('and', 'or'): 

471 left_bools = set(left.py__bool__() for left in types) 

472 if left_bools == {True}: 

473 if operator == 'and': 

474 types = context.infer_node(right) 

475 elif left_bools == {False}: 

476 if operator != 'and': 

477 types = context.infer_node(right) 

478 # Otherwise continue, because of uncertainty. 

479 else: 

480 types = _infer_comparison(context, types, operator, 

481 context.infer_node(right)) 

482 debug.dbg('infer_or_test types %s', types) 

483 return types 

484 

485 

486@iterator_to_value_set 

487def infer_factor(value_set, operator): 

488 """ 

489 Calculates `+`, `-`, `~` and `not` prefixes. 

490 """ 

491 for value in value_set: 

492 if operator == '-': 

493 if is_number(value): 

494 yield value.negate() 

495 elif operator == 'not': 

496 b = value.py__bool__() 

497 if b is None: # Uncertainty. 

498 yield list(value.inference_state.builtins_module.py__getattribute__('bool') 

499 .execute_annotation()).pop() 

500 else: 

501 yield compiled.create_simple_object(value.inference_state, not b) 

502 else: 

503 yield value 

504 

505 

506def _literals_to_types(inference_state, result): 

507 # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), 

508 # int(), float(), etc). 

509 new_result = NO_VALUES 

510 for typ in result: 

511 if is_literal(typ): 

512 # Literals are only valid as long as the operations are 

513 # correct. Otherwise add a value-free instance. 

514 cls = compiled.builtin_from_name(inference_state, typ.name.string_name) 

515 new_result |= cls.execute_with_values() 

516 else: 

517 new_result |= ValueSet([typ]) 

518 return new_result 

519 

520 

521def _infer_comparison(context, left_values, operator, right_values): 

522 state = context.inference_state 

523 if isinstance(operator, str): 

524 operator_str = operator 

525 else: 

526 operator_str = str(operator.value) 

527 if not left_values or not right_values: 

528 # illegal slices e.g. cause left/right_result to be None 

529 result = (left_values or NO_VALUES) | (right_values or NO_VALUES) 

530 return _literals_to_types(state, result) 

531 elif operator_str == "|" and all( 

532 value.is_class() or value.is_compiled() 

533 for value in itertools.chain(left_values, right_values) 

534 ): 

535 # ^^^ A naive hack for PEP 604 

536 return ValueSet.from_sets((left_values, right_values)) 

537 else: 

538 # I don't think there's a reasonable chance that a string 

539 # operation is still correct, once we pass something like six 

540 # objects. 

541 if len(left_values) * len(right_values) > 6: 

542 return _literals_to_types(state, left_values | right_values) 

543 else: 

544 return ValueSet.from_sets( 

545 _infer_comparison_part(state, context, left, operator, right) 

546 for left in left_values 

547 for right in right_values 

548 ) 

549 

550 

551def _is_annotation_name(name): 

552 ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') 

553 if ancestor is None: 

554 return False 

555 

556 if ancestor.type in ('param', 'funcdef'): 

557 ann = ancestor.annotation 

558 if ann is not None: 

559 return ann.start_pos <= name.start_pos < ann.end_pos 

560 elif ancestor.type == 'expr_stmt': 

561 c = ancestor.children 

562 if len(c) > 1 and c[1].type == 'annassign': 

563 return c[1].start_pos <= name.start_pos < c[1].end_pos 

564 return False 

565 

566 

567def _is_list(value): 

568 return value.array_type == 'list' 

569 

570 

571def _is_tuple(value): 

572 return value.array_type == 'tuple' 

573 

574 

575def _bool_to_value(inference_state, bool_): 

576 return compiled.builtin_from_name(inference_state, str(bool_)) 

577 

578 

579def _get_tuple_ints(value): 

580 if not isinstance(value, iterable.SequenceLiteralValue): 

581 return None 

582 numbers = [] 

583 for lazy_value in value.py__iter__(): 

584 if not isinstance(lazy_value, LazyTreeValue): 

585 return None 

586 node = lazy_value.data 

587 if node.type != 'number': 

588 return None 

589 try: 

590 numbers.append(int(node.value)) 

591 except ValueError: 

592 return None 

593 return numbers 

594 

595 

596def _infer_comparison_part(inference_state, context, left, operator, right): 

597 l_is_num = is_number(left) 

598 r_is_num = is_number(right) 

599 if isinstance(operator, str): 

600 str_operator = operator 

601 else: 

602 str_operator = str(operator.value) 

603 

604 if str_operator == '*': 

605 # for iterables, ignore * operations 

606 if isinstance(left, iterable.Sequence) or is_string(left): 

607 return ValueSet([left]) 

608 elif isinstance(right, iterable.Sequence) or is_string(right): 

609 return ValueSet([right]) 

610 elif str_operator == '+': 

611 if l_is_num and r_is_num or is_string(left) and is_string(right): 

612 return left.execute_operation(right, str_operator) 

613 elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): 

614 return ValueSet([iterable.MergedArray(inference_state, (left, right))]) 

615 elif str_operator == '-': 

616 if l_is_num and r_is_num: 

617 return left.execute_operation(right, str_operator) 

618 elif str_operator == '%': 

619 # With strings and numbers the left type typically remains. Except for 

620 # `int() % float()`. 

621 return ValueSet([left]) 

622 elif str_operator in COMPARISON_OPERATORS: 

623 if left.is_compiled() and right.is_compiled(): 

624 # Possible, because the return is not an option. Just compare. 

625 result = left.execute_operation(right, str_operator) 

626 if result: 

627 return result 

628 else: 

629 if str_operator in ('is', '!=', '==', 'is not'): 

630 operation = COMPARISON_OPERATORS[str_operator] 

631 bool_ = operation(left, right) 

632 # Only if == returns True or != returns False, we can continue. 

633 # There's no guarantee that they are not equal. This can help 

634 # in some cases, but does not cover everything. 

635 if (str_operator in ('is', '==')) == bool_: 

636 return ValueSet([_bool_to_value(inference_state, bool_)]) 

637 

638 if isinstance(left, VersionInfo): 

639 version_info = _get_tuple_ints(right) 

640 if version_info is not None: 

641 bool_result = compiled.access.COMPARISON_OPERATORS[operator]( 

642 inference_state.environment.version_info, 

643 tuple(version_info) 

644 ) 

645 return ValueSet([_bool_to_value(inference_state, bool_result)]) 

646 

647 return ValueSet([ 

648 _bool_to_value(inference_state, True), 

649 _bool_to_value(inference_state, False) 

650 ]) 

651 elif str_operator in ('in', 'not in'): 

652 return inference_state.builtins_module.py__getattribute__('bool').execute_annotation() 

653 

654 def check(obj): 

655 """Checks if a Jedi object is either a float or an int.""" 

656 return isinstance(obj, TreeInstance) and \ 

657 obj.name.string_name in ('int', 'float') 

658 

659 # Static analysis, one is a number, the other one is not. 

660 if str_operator in ('+', '-') and l_is_num != r_is_num \ 

661 and not (check(left) or check(right)): 

662 message = "TypeError: unsupported operand type(s) for +: %s and %s" 

663 analysis.add(context, 'type-error-operation', operator, 

664 message % (left, right)) 

665 

666 if left.is_class() or right.is_class(): 

667 return NO_VALUES 

668 

669 method_name = operator_to_magic_method[str_operator] 

670 magic_methods = left.py__getattribute__(method_name) 

671 if magic_methods: 

672 result = magic_methods.execute_with_values(right) 

673 if result: 

674 return result 

675 

676 if not magic_methods: 

677 reverse_method_name = reverse_operator_to_magic_method[str_operator] 

678 magic_methods = right.py__getattribute__(reverse_method_name) 

679 

680 result = magic_methods.execute_with_values(left) 

681 if result: 

682 return result 

683 

684 result = ValueSet([left, right]) 

685 debug.dbg('Used operator %s resulting in %s', operator, result) 

686 return result 

687 

688 

689@plugin_manager.decorate() 

690def tree_name_to_values(inference_state, context, tree_name): 

691 value_set = NO_VALUES 

692 module_node = context.get_root_context().tree_node 

693 # First check for annotations, like: `foo: int = 3` 

694 if module_node is not None: 

695 names = module_node.get_used_names().get(tree_name.value, []) 

696 found_annotation = False 

697 for name in names: 

698 expr_stmt = name.parent 

699 

700 if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": 

701 correct_scope = parser_utils.get_parent_scope(name) == context.tree_node 

702 ann_assign = expr_stmt.children[1] 

703 if correct_scope: 

704 found_annotation = True 

705 if ( 

706 (ann_assign.children[1].type == 'name') 

707 and (ann_assign.children[1].value == tree_name.value) 

708 and context.parent_context 

709 ): 

710 context = context.parent_context 

711 value_set |= annotation.infer_annotation( 

712 context, expr_stmt.children[1].children[1] 

713 ).execute_annotation() 

714 if found_annotation: 

715 return value_set 

716 

717 types = [] 

718 node = tree_name.get_definition(import_name_always=True, include_setitem=True) 

719 if node is None: 

720 node = tree_name.parent 

721 if node.type == 'global_stmt': 

722 c = context.create_context(tree_name) 

723 if c.is_module(): 

724 # In case we are already part of the module, there is no point 

725 # in looking up the global statement anymore, because it's not 

726 # valid at that point anyway. 

727 return NO_VALUES 

728 # For global_stmt lookups, we only need the first possible scope, 

729 # which means the function itself. 

730 filter = next(c.get_filters()) 

731 names = filter.get(tree_name.value) 

732 return ValueSet.from_sets(name.infer() for name in names) 

733 elif node.type not in ('import_from', 'import_name'): 

734 c = context.create_context(tree_name) 

735 return infer_atom(c, tree_name) 

736 

737 typ = node.type 

738 if typ == 'for_stmt': 

739 types = annotation.find_type_from_comment_hint_for(context, node, tree_name) 

740 if types: 

741 return types 

742 if typ == 'with_stmt': 

743 types = annotation.find_type_from_comment_hint_with(context, node, tree_name) 

744 if types: 

745 return types 

746 

747 if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): 

748 try: 

749 types = context.predefined_names[node][tree_name.value] 

750 except KeyError: 

751 cn = ContextualizedNode(context, node.children[3]) 

752 for_types = iterate_values( 

753 cn.infer(), 

754 contextualized_node=cn, 

755 is_async=node.parent.type == 'async_stmt', 

756 ) 

757 n = TreeNameDefinition(context, tree_name) 

758 types = check_tuple_assignments(n, for_types) 

759 elif typ == 'expr_stmt': 

760 types = infer_expr_stmt(context, node, tree_name) 

761 elif typ == 'with_stmt': 

762 value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) 

763 if node.parent.type == 'async_stmt': 

764 # In the case of `async with` statements, we need to 

765 # first get the coroutine from the `__aenter__` method, 

766 # then "unwrap" via the `__await__` method 

767 enter_methods = value_managers.py__getattribute__('__aenter__') 

768 coro = enter_methods.execute_with_values() 

769 return coro.py__await__().py__stop_iteration_returns() 

770 enter_methods = value_managers.py__getattribute__('__enter__') 

771 return enter_methods.execute_with_values() 

772 elif typ in ('import_from', 'import_name'): 

773 types = imports.infer_import(context, tree_name) 

774 elif typ in ('funcdef', 'classdef'): 

775 types = _apply_decorators(context, node) 

776 elif typ == 'try_stmt': 

777 # TODO an exception can also be a tuple. Check for those. 

778 # TODO check for types that are not classes and add it to 

779 # the static analysis report. 

780 exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) 

781 types = exceptions.execute_with_values() 

782 elif typ == 'param': 

783 types = NO_VALUES 

784 elif typ == 'del_stmt': 

785 types = NO_VALUES 

786 elif typ == 'namedexpr_test': 

787 types = infer_node(context, node) 

788 else: 

789 raise ValueError("Should not happen. type: %s" % typ) 

790 return types 

791 

792 

793# We don't want to have functions/classes that are created by the same 

794# tree_node. 

795@inference_state_method_cache() 

796def _apply_decorators(context, node): 

797 """ 

798 Returns the function, that should to be executed in the end. 

799 This is also the places where the decorators are processed. 

800 """ 

801 if node.type == 'classdef': 

802 decoratee_value = ClassValue( 

803 context.inference_state, 

804 parent_context=context, 

805 tree_node=node 

806 ) 

807 else: 

808 decoratee_value = FunctionValue.from_context(context, node) 

809 initial = values = ValueSet([decoratee_value]) 

810 

811 if is_big_annoying_library(context): 

812 return values 

813 

814 for dec in reversed(node.get_decorators()): 

815 debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") 

816 with debug.increase_indent_cm(): 

817 dec_values = context.infer_node(dec.children[1]) 

818 trailer_nodes = dec.children[2:-1] 

819 if trailer_nodes: 

820 # Create a trailer and infer it. 

821 trailer = tree.PythonNode('trailer', trailer_nodes) 

822 trailer.parent = dec 

823 dec_values = infer_trailer(context, dec_values, trailer) 

824 

825 if not len(dec_values): 

826 code = dec.get_code(include_prefix=False) 

827 # For the short future, we don't want to hear about the runtime 

828 # decorator in typing that was intentionally omitted. This is not 

829 # "correct", but helps with debugging. 

830 if code != '@runtime\n': 

831 debug.warning('decorator not found: %s on %s', dec, node) 

832 return initial 

833 

834 values = dec_values.execute(arguments.ValuesArguments([values])) 

835 if not len(values): 

836 debug.warning('not possible to resolve wrappers found %s', node) 

837 return initial 

838 

839 debug.dbg('decorator end %s', values, color="MAGENTA") 

840 if values != initial: 

841 return ValueSet([Decoratee(c, decoratee_value) for c in values]) 

842 return values 

843 

844 

845def check_tuple_assignments(name, value_set): 

846 """ 

847 Checks if tuples are assigned. 

848 """ 

849 lazy_value = None 

850 for index, node in name.assignment_indexes(): 

851 cn = ContextualizedNode(name.parent_context, node) 

852 iterated = value_set.iterate(cn) 

853 if isinstance(index, slice): 

854 # For no star unpacking is not possible. 

855 return NO_VALUES 

856 i = 0 

857 while i <= index: 

858 try: 

859 lazy_value = next(iterated) 

860 except StopIteration: 

861 # We could do this with the default param in next. But this 

862 # would allow this loop to run for a very long time if the 

863 # index number is high. Therefore break if the loop is 

864 # finished. 

865 return NO_VALUES 

866 else: 

867 i += lazy_value.max 

868 value_set = lazy_value.infer() 

869 return value_set 

870 

871 

872class ContextualizedSubscriptListNode(ContextualizedNode): 

873 def infer(self): 

874 return _infer_subscript_list(self.context, self.node) 

875 

876 

877def _infer_subscript_list(context, index): 

878 """ 

879 Handles slices in subscript nodes. 

880 """ 

881 if index == ':': 

882 # Like array[:] 

883 return ValueSet([iterable.Slice(context, None, None, None)]) 

884 

885 elif index.type == 'subscript' and not index.children[0] == '.': 

886 # subscript basically implies a slice operation 

887 # e.g. array[:3] 

888 result = [] 

889 for el in index.children: 

890 if el == ':': 

891 if not result: 

892 result.append(None) 

893 elif el.type == 'sliceop': 

894 if len(el.children) == 2: 

895 result.append(el.children[1]) 

896 else: 

897 result.append(el) 

898 result += [None] * (3 - len(result)) 

899 

900 return ValueSet([iterable.Slice(context, *result)]) 

901 elif index.type == 'subscriptlist': 

902 return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) 

903 

904 # No slices 

905 return context.infer_node(index)