diff --git a/misc/diff-cache.py b/misc/diff-cache.py index 1f1b2a39c4bb3..07a2f416a2708 100644 --- a/misc/diff-cache.py +++ b/misc/diff-cache.py @@ -18,7 +18,7 @@ from librt import base64 from librt.internal import ReadBuffer, WriteBuffer -from mypy.cache import CacheMeta +from mypy.cache import CacheMeta, CacheMetaEx from mypy.metastore import FilesystemMetadataStore, MetadataStore, SqliteMetadataStore from mypy.util import json_dumps, json_loads @@ -69,6 +69,7 @@ def normalize_meta(meta: CacheMeta) -> None: Zero out mtimes and sort dependencies deterministically. """ + # TODO: handle dep_hashes here and in relevant parts below. meta.mtime = 0 meta.data_mtime = 0 meta.dependencies, meta.suppressed, meta.dep_prios, meta.dep_lines = sort_deps( @@ -115,7 +116,18 @@ def load(cache: MetadataStore, s: str) -> Any: return data normalize_meta(meta) return serialize_meta_ff(meta, version_prefix) - if s.endswith((".data.ff", ".err.ff")): + if s.endswith(".meta_ex.ff"): + buf = ReadBuffer(data) + meta = CacheMetaEx.read(buf) + if meta is None: + # Can't deserialize. Fall back to raw bytes as above + return data + meta.dependencies.sort() + meta.suppressed.sort() + outbuf = WriteBuffer() + meta.write(outbuf) + return outbuf.getvalue() + if s.endswith(".data.ff"): return data obj = json_loads(data) if s.endswith(".meta.json"): diff --git a/mypy/build.py b/mypy/build.py index f079d3a636630..423c57743958e 100644 --- a/mypy/build.py +++ b/mypy/build.py @@ -63,13 +63,13 @@ LIST_GEN, LITERAL_NONE, CacheMeta, + CacheMetaEx, ErrorTuple, JsonValue, ReadBuffer, Tag, WriteBuffer, read_bytes, - read_errors, read_int, read_int_list, read_int_opt, @@ -77,7 +77,6 @@ read_str_list, read_str_opt, write_bytes, - write_errors, write_int, write_int_list, write_int_opt, @@ -86,7 +85,7 @@ write_str_list, write_str_opt, ) -from mypy.checker import TypeChecker +from mypy.checker import DeferredNode, TypeChecker from mypy.defaults import ( WORKER_CONNECTION_TIMEOUT, WORKER_DONE_TIMEOUT, @@ -110,12 +109,15 @@ ) from mypy.messages import MessageBuilder from mypy.nodes import ( + Decorator, FileRawData, + FuncDef, Import, ImportAll, ImportBase, ImportFrom, MypyFile, + OverloadedFuncDef, SymbolTable, ) from mypy.options import OPTIONS_AFFECTING_CACHE_NO_PLATFORM @@ -1189,15 +1191,13 @@ def submit_to_workers(self, graph: Graph, sccs: list[SCC] | None = None) -> None ), ) - def wait_for_done( - self, graph: Graph - ) -> tuple[list[SCC], bool, dict[str, tuple[str, list[str]]]]: + def wait_for_done(self, graph: Graph) -> tuple[list[SCC], bool, dict[str, ModuleResult]]: """Wait for a stale SCC processing to finish. Return a tuple three items: * processed SCCs * whether we have more in the queue - * new interface hash and list of errors for each module + * new interface hash or list of errors for each module The last item is only used for parallel processing. """ if self.workers: @@ -1210,7 +1210,7 @@ def wait_for_done( def wait_for_done_workers( self, graph: Graph - ) -> tuple[list[SCC], bool, dict[str, tuple[str, list[str]]]]: + ) -> tuple[list[SCC], bool, dict[str, ModuleResult]]: if not self.scc_queue and len(self.free_workers) == len(self.workers): return [], False, {} @@ -1218,15 +1218,21 @@ def wait_for_done_workers( results = {} for idx in ready_to_read([w.conn for w in self.workers], WORKER_DONE_TIMEOUT): data = SccResponseMessage.read(receive(self.workers[idx].conn)) - self.free_workers.add(idx) + if not data.is_interface: + # Mark worker as free after it finished checking implementation. + self.free_workers.add(idx) scc_id = data.scc_id if data.blocker is not None: raise data.blocker assert data.result is not None results.update(data.result) - done_sccs.append(self.scc_by_id[scc_id]) + if data.is_interface: + send(self.workers[idx].conn, AckMessage()) + done_sccs.append(self.scc_by_id[scc_id]) self.submit_to_workers(graph) # advance after some workers are free. return ( + # Note that "done" means interface-ready in this context. This is what + # the caller should expect. done_sccs, bool(self.scc_queue) or len(self.free_workers) < len(self.workers), results, @@ -1628,10 +1634,10 @@ def create_metastore(options: Options, parallel_worker: bool = False) -> Metadat return mds -def get_errors_name(meta_name: str) -> str: - # Convert e.g. foo.bar.meta.ff to foo.bar.err.ff +def get_meta_ex_name(meta_name: str) -> str: + # Convert e.g. foo.bar.meta.ff to foo.bar.meta_ex.ff parts = meta_name.rsplit(".", maxsplit=2) - parts[1] = "err" + parts[1] = "meta_ex" return ".".join(parts) @@ -1697,7 +1703,7 @@ def options_snapshot(id: str, manager: BuildManager) -> dict[str, object]: def find_cache_meta( id: str, path: str, manager: BuildManager, skip_validation: bool = False -) -> tuple[CacheMeta | None, list[ErrorTuple]]: +) -> tuple[CacheMeta, CacheMetaEx] | None: """Find cache data for a module. Args: @@ -1707,7 +1713,7 @@ def find_cache_meta( skip_validation: if True skip any validation steps (used for parallel checking) Returns: - A CacheMeta instance if the cache data was found and appears + A CacheMeta/CacheMetaEx instance pair if the cache data was found and appears valid; otherwise None. """ # TODO: May need to take more build options into account @@ -1728,7 +1734,7 @@ def find_cache_meta( log_error=f"Could not load cache for {id}: ", ) if meta is None: - return None, [] + return None if manager.stats_enabled: t1 = time.time() if isinstance(meta, bytes): @@ -1737,31 +1743,35 @@ def find_cache_meta( # TODO: switch to something like librt.internal.read_byte() if this is slow. if meta[0] != cache_version() or meta[1] != CACHE_VERSION: manager.log(f"Metadata abandoned for {id}: incompatible cache format") - return None, [] + return None data_io = ReadBuffer(meta[2:]) m = CacheMeta.read(data_io, data_file) else: m = CacheMeta.deserialize(meta, data_file) if m is None: manager.log(f"Metadata abandoned for {id}: cannot deserialize data") - return None, [] + return None if manager.stats_enabled: t2 = time.time() manager.add_stats( load_meta_time=t2 - t0, load_meta_load_time=t1 - t0, load_meta_from_dict_time=t2 - t1 ) if skip_validation: - return m, [] + # If the caller requested no validation, skip the implementation part of the meta + # as well, as a performance optimization. Note: this may return an incomplete meta, + # only use if you know what you are doing. + assert manager.parallel_worker + return m, CacheMetaEx([], [], [], []) # Ignore cache if generated by an older mypy version. if m.version_id != manager.version_id and not manager.options.skip_version_check: manager.log(f"Metadata abandoned for {id}: different mypy version") - return None, [] + return None total_deps = len(m.dependencies) + len(m.suppressed) if len(m.dep_prios) != total_deps or len(m.dep_lines) != total_deps: manager.log(f"Metadata abandoned for {id}: broken dependencies") - return None, [] + return None # Ignore cache if (relevant) options aren't the same. # Note that it's fine to mutilate cached_options since it's only used here. @@ -1783,12 +1793,12 @@ def find_cache_meta( key, cached_options.get(key), current_options.get(key) ) ) - return None, [] + return None if manager.old_plugins_snapshot and manager.plugins_snapshot: # Check if plugins are still the same. if manager.plugins_snapshot != manager.old_plugins_snapshot: manager.log(f"Metadata abandoned for {id}: plugins differ") - return None, [] + return None plugin_data = manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=True)) if not manager.options.fixed_format_cache: # So that plugins can return data with tuples in it without @@ -1797,31 +1807,31 @@ def find_cache_meta( plugin_data = json_loads(json_dumps(plugin_data)) if m.plugin_data != plugin_data: manager.log(f"Metadata abandoned for {id}: plugin configuration differs") - return None, [] + return None - # Load cached errors for this file, even if empty. This is needed to avoid - # invalid cache state after a crash/blocker/Ctrl+C etc. - errors_file = get_errors_name(meta_file) + meta_ex_file = get_meta_ex_name(meta_file) if manager.options.fixed_format_cache: - errors = _load_ff_file( - errors_file, manager, log_error_fmt="Could not load errors for {}: ", id=id + meta_ex = _load_ff_file( + meta_ex_file, manager, log_error_fmt="Could not load meta_ex for {}: ", id=id ) else: - errors = _load_json_file( - errors_file, + meta_ex = _load_json_file( + meta_ex_file, manager, - log_success=f"Errors {id} ", - log_error=f"Could not load errors for {id}: ", + log_success=f"Meta_ex {id} ", + log_error=f"Could not load meta_ex for {id}: ", ) - if errors is None: - return None, [] - if isinstance(errors, bytes): - data_io = ReadBuffer(errors) - e = read_errors(data_io) + if meta_ex is None: + return None + if isinstance(meta_ex, bytes): + data_io = ReadBuffer(meta_ex) + me = CacheMetaEx.read(data_io) else: - e = [tuple(err) for err in errors["error_lines"]] + me = CacheMetaEx.deserialize(meta_ex) + if me is None: + return None manager.add_stats(fresh_metas=1) - return m, e + return m, me def validate_meta( @@ -2125,21 +2135,19 @@ def write_cache_meta(meta: CacheMeta, manager: BuildManager, meta_file: str) -> manager.log(f"Error writing cache meta file {meta_file}") -def write_errors_file( - meta_file: str, error_lines: list[ErrorTuple], manager: BuildManager -) -> None: +def write_cache_meta_ex(meta_file: str, meta_ex: CacheMetaEx, manager: BuildManager) -> None: # Write errors cache file - errors_file = get_errors_name(meta_file) + meta_ex_file = get_meta_ex_name(meta_file) metastore = manager.metastore if manager.options.fixed_format_cache: data_io = WriteBuffer() - write_errors(data_io, error_lines) + meta_ex.write(data_io) meta_bytes = data_io.getvalue() else: # Some generic JSON helpers require top-level to be a dict. - meta_bytes = json_dumps({"error_lines": error_lines}, manager.options.debug_cache) - if not metastore.write(errors_file, meta_bytes): - manager.log(f"Error writing errors file {errors_file}") + meta_bytes = json_dumps(meta_ex.serialize(), manager.options.debug_cache) + if not metastore.write(meta_ex_file, meta_bytes): + manager.log(f"Error writing meta_ex file {meta_ex_file}") """Dependency manager. @@ -2428,12 +2436,14 @@ def new_state( ignore_all = True meta = None + meta_ex = None interface_hash = b"" meta_source_hash = None if path and source is None and manager.cache_enabled: - meta, error_lines = find_cache_meta(id, path, manager) + meta_pair = find_cache_meta(id, path, manager) # TODO: Get mtime if not cached. - if meta is not None: + if meta_pair is not None: + meta, meta_ex = meta_pair interface_hash = meta.interface_hash meta_source_hash = meta.hash if path and source is None and manager.fscache.isdir(path): @@ -2446,6 +2456,7 @@ def new_state( manager.add_stats(validate_meta_time=time.time() - t0) if meta: + assert meta_ex is not None # Make copies, since we may modify these and want to # compare them to the originals later. dependencies = list(meta.dependencies) @@ -2455,10 +2466,19 @@ def new_state( priorities = {id: pri for id, pri in zip(all_deps, meta.dep_prios)} assert len(all_deps) == len(meta.dep_lines) dep_line_map = {id: line for id, line in zip(all_deps, meta.dep_lines)} + # Merge CacheMetaEx data into CacheMeta data. + for dep in meta_ex.dependencies + meta_ex.suppressed: + priorities[dep] = PRI_INDIRECT + dep_line_map[dep] = 1 + dependencies += meta_ex.dependencies + meta.dependencies += meta_ex.dependencies + suppressed += meta_ex.suppressed + meta.suppressed += meta_ex.suppressed + meta.dep_hashes += meta_ex.dep_hashes assert len(meta.dep_hashes) == len(meta.dependencies) dep_hashes = {k: v for (k, v) in zip(meta.dependencies, meta.dep_hashes)} # Only copy `error_lines` if the module is not silently imported. - error_lines = [] if ignore_all else error_lines + error_lines = [] if ignore_all else meta_ex.error_lines imports_ignored = meta.imports_ignored else: dependencies = [] @@ -2693,10 +2713,13 @@ def reload_meta(self) -> None: that may have changed after initial graph loading. Currently, this is only the interface hash. """ + assert self.manager.parallel_worker assert self.path is not None - self.meta, _ = find_cache_meta(self.id, self.path, self.manager, skip_validation=True) - assert self.meta is not None - self.interface_hash = self.meta.interface_hash + new_meta_pair = find_cache_meta(self.id, self.path, self.manager, skip_validation=True) + assert new_meta_pair is not None + new_meta, _ = new_meta_pair + # Copy relevant information from new meta (which may be incomplete). + self.interface_hash = new_meta.interface_hash def add_ancestors(self) -> None: if self.path is not None: @@ -3030,12 +3053,12 @@ def compute_dependencies(self) -> None: self.check_blockers() # Can fail due to bogus relative imports - def type_check_first_pass(self) -> None: + def type_check_first_pass(self, recurse_into_functions: bool = True) -> None: if self.options.semantic_analysis_only: return t0 = time_ref() with self.wrap_context(): - self.type_checker().check_first_pass() + self.type_checker().check_first_pass(recurse_into_functions=recurse_into_functions) self.time_spent_us += time_spent_us(t0) def type_checker(self) -> TypeChecker: @@ -3059,12 +3082,12 @@ def type_map(self) -> dict[Expression, Type]: assert len(self.type_checker()._type_maps) == 1 return self.type_checker()._type_maps[0] - def type_check_second_pass(self) -> bool: + def type_check_second_pass(self, todo: Sequence[DeferredNode] | None = None) -> bool: if self.options.semantic_analysis_only: return False t0 = time_ref() with self.wrap_context(): - result = self.type_checker().check_second_pass() + result = self.type_checker().check_second_pass(todo=todo) self.time_spent_us += time_spent_us(t0) return result @@ -3225,8 +3248,9 @@ def write_cache(self) -> tuple[CacheMeta, str] | None: self.id, self.path, self.tree, - list(self.dependencies), - list(self.suppressed), + # Indirect dependencies are stored separately as part of CacheMetaEx. + [dep for dep in self.dependencies if self.priorities.get(dep) != PRI_INDIRECT], + [dep for dep in self.suppressed if self.priorities.get(dep) != PRI_INDIRECT], self.suppressed_deps_opts(), self.imports_ignored, dep_prios, @@ -3293,10 +3317,18 @@ def verify_dependencies(self, suppressed_only: bool = False) -> None: pass def dependency_priorities(self) -> list[int]: - return [self.priorities.get(dep, PRI_HIGH) for dep in self.dependencies + self.suppressed] + return [ + prio + for dep in self.dependencies + self.suppressed + if (prio := self.priorities.get(dep, PRI_HIGH)) != PRI_INDIRECT + ] def dependency_lines(self) -> list[int]: - return [self.dep_line_map.get(dep, 1) for dep in self.dependencies + self.suppressed] + return [ + self.dep_line_map.get(dep, 1) + for dep in self.dependencies + self.suppressed + if self.priorities.get(dep) != PRI_INDIRECT + ] def generate_unused_ignore_notes(self) -> None: if ( @@ -4187,12 +4219,18 @@ def process_graph(graph: Graph, manager: BuildManager) -> None: # type-checking this is already done and results should be empty here. if not manager.workers: assert not results - for id, (interface_hash, errors) in results.items(): - new_hash = bytes.fromhex(interface_hash) - if new_hash != graph[id].interface_hash: - graph[id].mark_interface_stale() - graph[id].interface_hash = new_hash - manager.flush_errors(manager.errors.simplify_path(graph[id].xpath), errors, False) + for id, result in results.items(): + # Interface and implementation results may be mixed in the same batch + # from different workers, process each one accordingly. + if result.interface_hash is not None: + new_hash = bytes.fromhex(result.interface_hash) + if new_hash != graph[id].interface_hash: + graph[id].mark_interface_stale() + graph[id].interface_hash = new_hash + else: + manager.flush_errors( + manager.errors.simplify_path(graph[id].xpath), result.error_lines, False + ) ready = [] for done_scc in done: for dependent in done_scc.direct_dependents: @@ -4266,12 +4304,8 @@ def process_fresh_modules(graph: Graph, modules: list[str], manager: BuildManage manager.add_stats(process_fresh_time=t2 - t0, load_tree_time=t1 - t0) -def process_stale_scc( - graph: Graph, ascc: SCC, manager: BuildManager, from_cache: set[str] | None = None -) -> dict[str, tuple[str, list[str]]]: - """Process the modules in one SCC from source code.""" - # First verify if all transitive dependencies are loaded in the current process. - t0 = time.time() +def maybe_load_deps(graph: Graph, ascc: SCC, manager: BuildManager) -> None: + """Load any missing fresh modules needed to process a stale SCC""" missing_sccs = set() sccs_to_find = ascc.deps.copy() while sccs_to_find: @@ -4326,6 +4360,12 @@ def process_stale_scc( gc.unfreeze() gc.enable() + +def process_stale_scc(graph: Graph, ascc: SCC, manager: BuildManager) -> None: + """Process the modules in one SCC from source code.""" + # First verify if all transitive dependencies are loaded in the current process. + t0 = time.time() + maybe_load_deps(graph, ascc, manager) t1 = time.time() # Process the SCC in stable order. scc = order_ascc_ex(graph, ascc) @@ -4334,9 +4374,7 @@ def process_stale_scc( stale = scc for id in stale: # Re-generate import errors in case this module was loaded from the cache. - # Deserialized states all have meta=None, so the caller should specify - # explicitly which of them are from cache. - if graph[id].meta or from_cache and id in from_cache: + if graph[id].meta: graph[id].verify_dependencies(suppressed_only=True) # We may already have parsed the module, or not. # If the former, parse_file() is a no-op. @@ -4376,7 +4414,6 @@ def process_stale_scc( # Flush errors, and write cache in two phases: first data files, then meta files. meta_tuples = {} errors_by_id = {} - formatted_by_id = {} for id in stale: if graph[id].xpath not in manager.errors.ignored_files: errors = manager.errors.file_messages(graph[id].xpath) @@ -4385,16 +4422,30 @@ def process_stale_scc( ) manager.flush_errors(manager.errors.simplify_path(graph[id].xpath), formatted, False) errors_by_id[id] = errors - formatted_by_id[id] = formatted meta_tuples[id] = graph[id].write_cache() for id in stale: meta_tuple = meta_tuples[id] if meta_tuple is None: continue meta, meta_file = meta_tuple - meta.dep_hashes = [graph[dep].interface_hash for dep in graph[id].dependencies] + state = graph[id] + # Indirect dependencies are stored as part of CacheMetaEx below. + meta.dep_hashes = [ + graph[dep].interface_hash + for dep in graph[id].dependencies + if state.priorities.get(dep) != PRI_INDIRECT + ] write_cache_meta(meta, manager, meta_file) - write_errors_file(meta_file, errors_by_id.get(id, []), manager) + indirect = [dep for dep in state.dependencies if state.priorities.get(dep) == PRI_INDIRECT] + meta_ex = CacheMetaEx( + dependencies=indirect, + suppressed=[ + dep for dep in state.suppressed if state.priorities.get(dep) == PRI_INDIRECT + ], + dep_hashes=[graph[dep].interface_hash for dep in indirect], + error_lines=errors_by_id.get(id, []), + ) + write_cache_meta_ex(meta_file, meta_ex, manager) manager.done_sccs.add(ascc.id) manager.add_stats( load_missing_time=t1 - t0, @@ -4403,9 +4454,137 @@ def process_stale_scc( type_check_time=t4 - t3, flush_and_cache_time=time.time() - t4, ) + + +def process_stale_scc_interface( + graph: Graph, ascc: SCC, manager: BuildManager, from_cache: set[str] +) -> list[tuple[str, ModuleResult, str]]: + """Process the modules' interfaces in one SCC from source code.""" + # First verify if all transitive dependencies are loaded in the current process. + t0 = time.time() + maybe_load_deps(graph, ascc, manager) + t1 = time.time() + # Process the SCC in stable order. + scc = order_ascc_ex(graph, ascc) + + t2 = time.time() + stale = scc + for id in stale: + # Re-generate import errors in case this module was loaded from the cache. + # Deserialized states all have meta=None, so the caller should specify + # explicitly which of them are from cache. + if id in from_cache: + graph[id].verify_dependencies(suppressed_only=True) + mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors) + + t3 = time.time() + # Track what modules aren't yet done, so we can finish them as soon + # as possible, saving memory. + unfinished_modules = set(stale) + for id in stale: + graph[id].type_check_first_pass(recurse_into_functions=False) + if not graph[id].type_checker().deferred_nodes: + unfinished_modules.discard(id) + while unfinished_modules: + for id in stale: + if id not in unfinished_modules: + continue + if not graph[id].type_check_second_pass(): + unfinished_modules.discard(id) + + t4 = time.time() + scc_result = [] + meta_tuples = {} + for id in stale: + meta_tuple = graph[id].write_cache() + meta_tuples[id] = meta_tuple + for id in stale: + meta_tuple = meta_tuples[id] + if meta_tuple is None: + continue + meta, meta_file = meta_tuple + state = graph[id] + meta.dep_hashes = [ + graph[dep].interface_hash + for dep in state.dependencies + if state.priorities.get(dep) != PRI_INDIRECT + ] + write_cache_meta(meta, manager, meta_file) + scc_result.append((id, ModuleResult(graph[id].interface_hash.hex(), []), meta_file)) + manager.done_sccs.add(ascc.id) + manager.add_stats( + load_missing_time=t1 - t0, + order_scc_time=t2 - t1, + semanal_time=t3 - t2, + type_check_time_interface=t4 - t3, + flush_and_cache_time=time.time() - t4, + ) + return scc_result + + +def process_stale_scc_implementation( + graph: Graph, stale: list[str], manager: BuildManager, meta_files: list[str] +) -> dict[str, ModuleResult]: + """Process implementations (top-level function/method bodies) in an SCC.""" + t0 = time.time() + unfinished_modules = set(stale) + for id in stale: + checker = graph[id].type_checker() + # We need to reset deferral count after possibly deferring any methods that + # are considered part of the top-level (because they define/infer variables). + checker.pass_num = 0 + checker.deferred_nodes.clear() + tree = graph[id].tree + assert tree is not None + todo = [] + # Passing impl_only will select only "leaf" nodes (not the TypeInfos). + for _, node, info in tree.local_definitions(impl_only=True): + assert isinstance(node.node, (FuncDef, OverloadedFuncDef, Decorator)) + todo.append(DeferredNode(node.node, info)) + graph[id].type_check_second_pass(todo=todo) + if not checker.deferred_nodes: + unfinished_modules.discard(id) + graph[id].detect_possibly_undefined_vars() + graph[id].finish_passes() + while unfinished_modules: + for id in stale: + if id not in unfinished_modules: + continue + if not graph[id].type_check_second_pass(): + unfinished_modules.discard(id) + graph[id].detect_possibly_undefined_vars() + graph[id].finish_passes() + + for id in stale: + graph[id].generate_unused_ignore_notes() + graph[id].generate_ignore_without_code_notes() + scc_result = {} - for id in scc: - scc_result[id] = graph[id].interface_hash.hex(), formatted_by_id.get(id, []) + for id, meta_file in zip(stale, meta_files): + state = graph[id] + indirect = [dep for dep in state.dependencies if state.priorities.get(dep) == PRI_INDIRECT] + meta_ex = CacheMetaEx( + dependencies=indirect, + suppressed=[ + dep for dep in state.suppressed if state.priorities.get(dep) == PRI_INDIRECT + ], + dep_hashes=[graph[dep].interface_hash for dep in indirect], + error_lines=[], + ) + if graph[id].xpath not in manager.errors.ignored_files: + errors = manager.errors.file_messages(graph[id].xpath) + formatted = manager.errors.format_messages( + graph[id].xpath, errors, formatter=manager.error_formatter + ) + meta_ex.error_lines = errors + write_cache_meta_ex(meta_file, meta_ex, manager) + scc_result[id] = ModuleResult(None, formatted) + else: + # If there are bo error, only write the cache, don't send anything back + # to the caller (as a micro-optimization). + write_cache_meta_ex(meta_file, meta_ex, manager) + + manager.add_stats(type_check_time_implementation=time.time() - t0) return scc_result @@ -4648,19 +4827,42 @@ def write(self, buf: WriteBuffer) -> None: raw_data.write(buf) +class ModuleResult: + """A simple class representing the result of type-checking phase for a single module. + + Non-None interface hash signifies this is a result of checking the interface + of the module, otherwise this is a result of checking the implementation (which + includes errors encountered during both phases). + """ + + def __init__(self, interface_hash: str | None, error_lines: list[str]) -> None: + self.interface_hash = interface_hash + self.error_lines = error_lines + + @classmethod + def read(cls, buf: ReadBuffer) -> ModuleResult: + return ModuleResult(read_str_opt(buf), read_str_list(buf)) + + def write(self, buf: WriteBuffer) -> None: + write_str_opt(buf, self.interface_hash) + write_str_list(buf, self.error_lines) + + class SccResponseMessage(IPCMessage): """ A message representing a result of type checking an SCC. Only one of `result` or `blocker` can be non-None. The latter means there was - a blocking error while type checking the SCC. + a blocking error while type checking the SCC. The `is_interface` flag indicates + whether this is a result for interface or implementation phase of type-checking. """ def __init__( self, *, scc_id: int, - result: dict[str, tuple[str, list[str]]] | None = None, + is_interface: bool, + result: dict[str, ModuleResult] | None = None, blocker: CompileError | None = None, ) -> None: if result is not None: @@ -4668,6 +4870,7 @@ def __init__( if blocker is not None: assert result is None self.scc_id = scc_id + self.is_interface = is_interface self.result = result self.blocker = blocker @@ -4675,25 +4878,28 @@ def __init__( def read(cls, buf: ReadBuffer) -> SccResponseMessage: assert read_tag(buf) == SCC_RESPONSE_MESSAGE scc_id = read_int(buf) + is_interface = read_bool(buf) tag = read_tag(buf) if tag == LITERAL_NONE: return SccResponseMessage( scc_id=scc_id, + is_interface=is_interface, blocker=CompileError(read_str_list(buf), read_bool(buf), read_str_opt(buf)), ) else: assert tag == DICT_STR_GEN return SccResponseMessage( scc_id=scc_id, + is_interface=is_interface, result={ - read_str_bare(buf): (read_str(buf), read_str_list(buf)) - for _ in range(read_int_bare(buf)) + read_str_bare(buf): ModuleResult.read(buf) for _ in range(read_int_bare(buf)) }, ) def write(self, buf: WriteBuffer) -> None: write_tag(buf, SCC_RESPONSE_MESSAGE) write_int(buf, self.scc_id) + write_bool(buf, self.is_interface) if self.result is None: assert self.blocker is not None write_tag(buf, LITERAL_NONE) @@ -4705,9 +4911,7 @@ def write(self, buf: WriteBuffer) -> None: write_int_bare(buf, len(self.result)) for mod_id in sorted(self.result): write_str_bare(buf, mod_id) - hex_hash, errs = self.result[mod_id] - write_str(buf, hex_hash) - write_str_list(buf, errs) + self.result[mod_id].write(buf) class SourcesDataMessage(IPCMessage): diff --git a/mypy/build_worker/worker.py b/mypy/build_worker/worker.py index b35da8c412c73..3b5cecb4dda37 100644 --- a/mypy/build_worker/worker.py +++ b/mypy/build_worker/worker.py @@ -8,6 +8,8 @@ * Load graph using the sources, and send ack to coordinator. * Receive SCC structure from coordinator, and ack it. * Receive an SCC id from coordinator, process it, and send back the results. +* Each SCC is processed in two phases: interface then implementation, with an ack in + between. (It is not 100% clear why the ack is needed, but deadlocks happen without it) * When prompted by coordinator (with a scc_id=None message), cleanup and shutdown. """ @@ -37,7 +39,8 @@ SccsDataMessage, SourcesDataMessage, load_plugins, - process_stale_scc, + process_stale_scc_implementation, + process_stale_scc_interface, ) from mypy.defaults import RECURSION_LIMIT, WORKER_CONNECTION_TIMEOUT from mypy.errors import CompileError, ErrorInfo, Errors, report_internal_error @@ -153,24 +156,35 @@ def serve(server: IPCServer, ctx: ServerContext) -> None: scc = manager.scc_by_id[scc_id] t0 = time.time() try: - if platform.python_implementation() == "CPython": - # Since we are splitting the GC freeze hack into multiple smaller freezes, - # we should collect young generations to not accumulate accidental garbage. - gc.collect(generation=1) - gc.collect(generation=0) - gc.disable() load_states(scc, graph, manager, scc_message.import_errors, scc_message.mod_data) - if platform.python_implementation() == "CPython": - gc.freeze() - gc.unfreeze() - gc.enable() - result = process_stale_scc(graph, scc, manager, from_cache=graph_data.from_cache) + result = process_stale_scc_interface( + graph, scc, manager, from_cache=graph_data.from_cache + ) # We must commit after each SCC, otherwise we break --sqlite-cache. manager.metastore.commit() except CompileError as blocker: - send(server, SccResponseMessage(scc_id=scc_id, blocker=blocker)) + send(server, SccResponseMessage(scc_id=scc_id, is_interface=True, blocker=blocker)) else: - send(server, SccResponseMessage(scc_id=scc_id, result=result)) + mod_results = {} + stale = [] + meta_files = [] + for id, mod_result, meta_file in result: + stale.append(id) + mod_results[id] = mod_result + meta_files.append(meta_file) + send(server, SccResponseMessage(scc_id=scc_id, is_interface=True, result=mod_results)) + # Only proceed with the implementations if there are no blockers so far. + AckMessage.read(receive(server)) + try: + result = process_stale_scc_implementation(graph, stale, manager, meta_files) + # Both phases write cache, so we should commit here as well. + manager.metastore.commit() + except CompileError as blocker: + send( + server, SccResponseMessage(scc_id=scc_id, is_interface=False, blocker=blocker) + ) + else: + send(server, SccResponseMessage(scc_id=scc_id, is_interface=False, result=result)) manager.add_stats(total_process_stale_time=time.time() - t0, stale_sccs_processed=1) @@ -182,6 +196,12 @@ def load_states( mod_data: dict[str, tuple[bytes, FileRawData | None]], ) -> None: """Re-create full state of an SCC as it would have been in coordinator.""" + if platform.python_implementation() == "CPython": + # Since we are splitting the GC freeze hack into multiple smaller freezes, + # we should collect young generations to not accumulate accidental garbage. + gc.collect(generation=1) + gc.collect(generation=0) + gc.disable() for id in scc.mod_ids: state = graph[id] # Re-clone options since we don't send them, it is usually faster than deserializing. @@ -200,6 +220,10 @@ def load_states( manager.errors.set_file(state.xpath, id, state.options) for err_info in import_errors[id]: manager.errors.add_error_info(err_info) + if platform.python_implementation() == "CPython": + gc.freeze() + gc.unfreeze() + gc.enable() def setup_worker_manager(sources: list[BuildSource], ctx: ServerContext) -> BuildManager | None: diff --git a/mypy/cache.py b/mypy/cache.py index 0adc7affb8cb4..096ebedfbf7c1 100644 --- a/mypy/cache.py +++ b/mypy/cache.py @@ -69,7 +69,7 @@ from mypy_extensions import u8 # High-level cache layout format -CACHE_VERSION: Final = 7 +CACHE_VERSION: Final = 8 # Type used internally to represent errors: # (path, line, column, end_line, end_column, severity, message, code) @@ -77,7 +77,12 @@ class CacheMeta: - """Class representing cache metadata for a module.""" + """Class representing cache metadata for a module. + + This class represents the data known after checking module interface only, i.e. + this doesn't have: error messages and indirect dependencies, these are stored + in CacheMetaEx. + """ def __init__( self, @@ -236,6 +241,60 @@ def read(cls, data: ReadBuffer, data_file: str) -> CacheMeta | None: return None +class CacheMetaEx: + """Class representing "implementation-specific" part of cache metadata for a module.""" + + def __init__( + self, + dependencies: list[str], + suppressed: list[str], + dep_hashes: list[bytes], + error_lines: list[ErrorTuple], + ) -> None: + self.dependencies = dependencies + self.suppressed = suppressed + self.dep_hashes = dep_hashes + self.error_lines = error_lines + + def serialize(self) -> dict[str, Any]: + return { + "dependencies": self.dependencies, + "suppressed": self.suppressed, + "dep_hashes": [dep.hex() for dep in self.dep_hashes], + "error_lines": self.error_lines, + } + + @classmethod + def deserialize(cls, meta: dict[str, Any]) -> CacheMetaEx | None: + try: + return CacheMetaEx( + dependencies=meta["dependencies"], + suppressed=meta["suppressed"], + dep_hashes=[bytes.fromhex(dep) for dep in meta["dep_hashes"]], + error_lines=[tuple(err) for err in meta["error_lines"]], + ) + except (KeyError, ValueError): + return None + + def write(self, data: WriteBuffer) -> None: + write_str_list(data, self.dependencies) + write_str_list(data, self.suppressed) + write_bytes_list(data, self.dep_hashes) + write_errors(data, self.error_lines) + + @classmethod + def read(cls, data: ReadBuffer) -> CacheMetaEx | None: + try: + return CacheMetaEx( + dependencies=read_str_list(data), + suppressed=read_str_list(data), + dep_hashes=read_bytes_list(data), + error_lines=read_errors(data), + ) + except (ValueError, AssertionError): + return None + + # Always use this type alias to refer to type tags. Tag = u8 diff --git a/mypy/checker.py b/mypy/checker.py index 7d0b5dbde09d8..f33f6f9a5c370 100644 --- a/mypy/checker.py +++ b/mypy/checker.py @@ -353,8 +353,6 @@ class TypeChecker(NodeVisitor[None], TypeCheckerSharedApi, SplittingVisitor): tscope: Scope scope: CheckerScope - # Innermost enclosing type - type: TypeInfo | None # Stack of function return types return_types: list[Type] # Flags; true for dynamically typed functions @@ -432,7 +430,6 @@ def __init__( self.scope = CheckerScope(tree) self.binder = ConditionalTypeBinder(options) self.globals = tree.names - self.type = None self.return_types = [] self.dynamic_funcs = [] self.partial_types = [] @@ -512,7 +509,7 @@ def reset(self) -> None: self.inferred_attribute_types = None self.scope = CheckerScope(self.tree) - def check_first_pass(self) -> None: + def check_first_pass(self, recurse_into_functions: bool = True) -> None: """Type check the entire file, but defer functions with unresolved references. Unresolved references are forward references to variables @@ -522,7 +519,7 @@ def check_first_pass(self) -> None: Deferred functions will be processed by check_second_pass(). """ - self.recurse_into_functions = True + self.recurse_into_functions = recurse_into_functions with state.strict_optional_set(self.options.strict_optional), checker_state.set(self): self.errors.set_file( self.path, self.tree.fullname, scope=self.tscope, options=self.options @@ -718,11 +715,11 @@ def accept_loop( # def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: - # If a function/method can infer variable types, it should be processed as part - # of the module top level (i.e. module interface). - if not self.recurse_into_functions and not defn.def_or_infer_vars: - return - with self.tscope.function_scope(defn), self.set_recurse_into_functions(): + # We always process overload as part of the top-level to infer various + # externally visible properties like its type, similar to visit_decorator(). + # Only the body of the implementation is checked as a function-level target. + # TODO: clean-up deferral logic and the daemon to avoid unnecessary work. + with self.tscope.function_scope(defn): self._visit_overloaded_func_def(defn) def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: @@ -771,6 +768,11 @@ def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: settable = defn.items[0].var.is_settable_property # Do not visit the second time the items we checked above. if (settable and i > 1) or (not settable and i > 0): + # Type check initialization expressions. + # TODO: initializers can infer types if they contain a walrus, + # it may be not safe to optimize them away completely. + if not self.can_skip_diagnostics: + self.check_default_params(fdef.func) self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True) else: # Perform full check for real overloads to infer type of all decorated @@ -1196,6 +1198,13 @@ def get_generator_return_type(self, return_type: Type, is_coroutine: bool) -> Ty return NoneType() def visit_func_def(self, defn: FuncDef) -> None: + # Type check initialization expressions as part of top-level. + if not self.can_skip_diagnostics: + self.check_default_params(defn) + if defn.original_def: + # Override previous definition (may affect externally visible types). + new_type = self.function_type(defn) + self.check_func_def_override(defn, new_type) if not self.recurse_into_functions and not defn.def_or_infer_vars: return with self.tscope.function_scope(defn), self.set_recurse_into_functions(): @@ -1210,10 +1219,6 @@ def visit_func_def(self, defn: FuncDef) -> None: found_method_base_classes = self.check_method_override(defn) self.check_explicit_override_decorator(defn, found_method_base_classes) self.check_inplace_operator_method(defn) - if defn.original_def: - # Override previous definition. - new_type = self.function_type(defn) - self.check_func_def_override(defn, new_type) def check_func_item( self, @@ -1262,12 +1267,12 @@ def check_func_def_override(self, defn: FuncDef, new_type: FunctionLike) -> None # decorated function. orig_type = defn.original_def.type if orig_type is None: - # If other branch is unreachable, we don't type check it and so we might + # If other branch is unreachable, we don't type check it, and so we might # not have a type for the original definition return if isinstance(orig_type, PartialType): if orig_type.type is None: - # Ah this is a partial type. Give it the type of the function. + # Ah, this is a partial type. Give it the type of the function. orig_def = defn.original_def if isinstance(orig_def, Decorator): var = orig_def.var @@ -1279,8 +1284,9 @@ def check_func_def_override(self, defn: FuncDef, new_type: FunctionLike) -> None del partial_types[var] else: # Trying to redefine something like partial empty list as function. + defn.is_invalid_redefinition = True self.fail(message_registry.INCOMPATIBLE_REDEFINITION, defn) - else: + elif not defn.is_invalid_redefinition: name_expr = NameExpr(defn.name) name_expr.node = defn.original_def self.binder.assign_type(name_expr, new_type, orig_type) @@ -1320,29 +1326,11 @@ def check_func_def( if not self.can_skip_diagnostics: self.check_funcdef_item(item, typ, name, defn=defn, original_typ=original_typ) - # Fix the type if decorated with `@types.coroutine` or `@asyncio.coroutine`. - if defn.is_awaitable_coroutine: - # Update the return type to AwaitableGenerator. - # (This doesn't exist in typing.py, only in typing.pyi.) - t = typ.ret_type - c = defn.is_coroutine - ty = self.get_generator_yield_type(t, c) - tc = self.get_generator_receive_type(t, c) - if c: - tr = self.get_coroutine_return_type(t) - else: - tr = self.get_generator_return_type(t, c) - ret_type = self.named_generic_type( - "typing.AwaitableGenerator", [ty, tc, tr, t] - ) - typ = typ.copy_modified(ret_type=ret_type) - defn.type = typ - # Push return type. self.return_types.append(typ.ret_type) with self.scope.push_function(defn): - # We temporary push the definition to get the self type as + # We temporarily push the definition to get the self type as # visible from *inside* of this function/method. ref_type: Type | None = self.scope.active_self_type() @@ -1384,11 +1372,6 @@ def check_func_def( # Need to store arguments again for the expanded item. store_argument_type(item, i, typ, self.named_generic_type) - # Type check initialization expressions. - body_is_trivial = is_trivial_body(defn.body) - if not self.can_skip_diagnostics: - self.check_default_params(item, body_is_trivial) - # Type check body in a new scope. with self.binder.top_frame_context(): # Copy some type narrowings from an outer function when it seems safe enough @@ -1445,6 +1428,7 @@ def check_func_def( self.binder.pop_frame(True, 0) if not unreachable: + body_is_trivial = is_trivial_body(defn.body) if defn.is_generator or is_named_instance( self.return_types[-1], "typing.AwaitableGenerator" ): @@ -1618,7 +1602,7 @@ def require_correct_self_argument(self, func: Type, defn: FuncDef) -> bool: return bool(func.arg_types) with self.scope.push_function(defn): - # We temporary push the definition to get the self type as + # We temporarily push the definition to get the self type as # visible from *inside* of this function/method. ref_type: Type | None = self.scope.active_self_type() if ref_type is None: @@ -1710,7 +1694,12 @@ def check_unbound_return_typevar(self, typ: CallableType) -> None: context=typ.ret_type, ) - def check_default_params(self, item: FuncItem, body_is_trivial: bool) -> None: + def check_default_params(self, item: FuncItem, body_is_trivial: bool | None = None) -> None: + if body_is_trivial is None: + body_is_trivial = is_trivial_body(item.body) + # Although initializers are checked as part of the top-level, we do not + # show errors in them if they appear in an unannotated function. + self.dynamic_funcs.append(item.is_dynamic()) for param in item.arguments: if param.initializer is None: continue @@ -1744,6 +1733,7 @@ def check_default_params(self, item: FuncItem, body_is_trivial: bool) -> None: rvalue_name="default", notes=notes, ) + self.dynamic_funcs.pop() def is_forward_op_method(self, method_name: str) -> bool: return method_name in operators.reverse_op_methods @@ -2680,11 +2670,7 @@ def visit_class_def(self, defn: ClassDef) -> None: self.fail(message_registry.CANNOT_INHERIT_FROM_FINAL.format(base.name), defn) if not can_have_shared_disjoint_base(typ.bases): self.fail(message_registry.INCOMPATIBLE_DISJOINT_BASES.format(typ.name), defn) - with ( - self.tscope.class_scope(defn.info), - self.enter_partial_types(is_class=True), - self.enter_class(defn.info), - ): + with self.tscope.class_scope(defn.info), self.enter_partial_types(is_class=True): old_binder = self.binder self.binder = ConditionalTypeBinder(self.options) with self.binder.top_frame_context(): @@ -2753,15 +2739,6 @@ def visit_class_def(self, defn: ClassDef) -> None: self.check_enum(defn) infer_class_variances(defn.info) - @contextmanager - def enter_class(self, type: TypeInfo) -> Iterator[None]: - original_type = self.type - self.type = type - try: - yield - finally: - self.type = original_type - def check_final_deletable(self, typ: TypeInfo) -> None: # These checks are only for mypyc. Only perform some checks that are easier # to implement here than in mypyc. @@ -5604,6 +5581,29 @@ def visit_decorator(self, e: Decorator) -> None: def visit_decorator_inner( self, e: Decorator, allow_empty: bool = False, skip_first_item: bool = False ) -> None: + # Fix the type if decorated with `@types.coroutine` or `@asyncio.coroutine`. + defn = e.func + if defn.is_awaitable_coroutine: + assert isinstance(defn.type, CallableType) + # Update the return type to AwaitableGenerator (unless we already did). + # Note, this doesn't exist in typing.py, only in typing.pyi. + if not is_named_instance(defn.type.ret_type, "typing.AwaitableGenerator"): + t = defn.type.ret_type + c = defn.is_coroutine + ty = self.get_generator_yield_type(t, c) + tc = self.get_generator_receive_type(t, c) + if c: + tr = self.get_coroutine_return_type(t) + else: + tr = self.get_generator_return_type(t, c) + ret_type = self.named_generic_type("typing.AwaitableGenerator", [ty, tc, tr, t]) + typ = defn.type.copy_modified(ret_type=ret_type) + defn.type = typ + + # Type check initialization expressions as part of top-level. + if not self.can_skip_diagnostics: + self.check_default_params(defn) + if self.recurse_into_functions or e.func.def_or_infer_vars: with self.tscope.function_scope(e.func), self.set_recurse_into_functions(): self.check_func_item(e.func, name=e.func.name, allow_empty=allow_empty) @@ -6108,11 +6108,13 @@ def intersect_instance_callable(self, typ: Instance, callable_type: CallableType short_name = format_type_bare(typ, self.options) cdef, info = self.make_fake_typeinfo(cur_module.fullname, gen_name, short_name, [typ]) - # Build up a fake FuncDef so we can populate the symbol table. + # Build up a fake FuncDef, so we can populate the symbol table. func_def = FuncDef("__call__", [], Block([]), callable_type) func_def._fullname = cdef.fullname + ".__call__" func_def.info = info - info.names["__call__"] = SymbolTableNode(MDEF, func_def) + sym = SymbolTableNode(MDEF, func_def) + sym.plugin_generated = True + info.names["__call__"] = sym cur_module.names[gen_name] = SymbolTableNode(GDEF, info) @@ -8338,7 +8340,7 @@ def is_func_scope(self) -> bool: @property def type(self) -> TypeInfo | None: - return self._chk.type + return self._chk.scope.current_class() class CollectArgTypeVarTypes(TypeTraverserVisitor): diff --git a/mypy/checker_shared.py b/mypy/checker_shared.py index 55d9e15764178..fef4c48197efa 100644 --- a/mypy/checker_shared.py +++ b/mypy/checker_shared.py @@ -343,9 +343,14 @@ def active_self_type(self) -> Instance | TupleType | None: def current_self_type(self) -> Instance | TupleType | None: """Same as active_self_type() but handle functions nested in methods.""" + if (item := self.current_class()) is not None: + return fill_typevars(item) + return None + + def current_class(self) -> TypeInfo | None: for item in reversed(self.stack): if isinstance(item, TypeInfo): - return fill_typevars(item) + return item return None def is_top_level(self) -> bool: diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py index 49fc1159856f7..6000a1bd649c1 100644 --- a/mypy/checkexpr.py +++ b/mypy/checkexpr.py @@ -6431,7 +6431,7 @@ def try_parse_as_type_expression(self, maybe_type_expr: Expression) -> Type | No if not isinstance(maybe_type_expr, MaybeTypeExpression): return None - # Check whether has already been parsed as a type expression + # Check whether it has already been parsed as a type expression # by SemanticAnalyzer.try_parse_as_type_expression(), # perhaps containing a string annotation if ( diff --git a/mypy/nodes.py b/mypy/nodes.py index 37ea4d3b0d561..cc05bed6b8835 100644 --- a/mypy/nodes.py +++ b/mypy/nodes.py @@ -472,12 +472,12 @@ def __init__( self._is_typeshed_file = None self.raw_data = None - def local_definitions(self) -> Iterator[Definition]: + def local_definitions(self, *, impl_only: bool = False) -> Iterator[Definition]: """Return all definitions within the module (including nested). This doesn't include imported definitions. """ - return local_definitions(self.names, self.fullname) + return local_definitions(self.names, self.fullname, impl_only=impl_only) @property def name(self) -> str: @@ -1036,6 +1036,7 @@ class FuncDef(FuncItem, SymbolNode, Statement): "original_def", "is_trivial_body", "is_trivial_self", + "is_invalid_redefinition", "is_mypy_only", # Present only when a function is decorated with @typing.dataclass_transform or similar "dataclass_transform_spec", @@ -1080,6 +1081,10 @@ def __init__( self.original_first_arg: str | None = arguments[0].variable.name else: self.original_first_arg = None + # Whether this function is an invalid redefinition of variable with the same name? + # We record this status to avoid multiple (similar but different) errors in case + # of partial types etc. + self.is_invalid_redefinition = False @property def name(self) -> str: @@ -5227,11 +5232,12 @@ def get_func_def(typ: mypy.types.CallableType) -> SymbolNode | None: def local_definitions( - names: SymbolTable, name_prefix: str, info: TypeInfo | None = None + names: SymbolTable, name_prefix: str, info: TypeInfo | None = None, impl_only: bool = False ) -> Iterator[Definition]: """Iterate over local definitions (not imported) in a symbol table. - Recursively iterate over class members and nested classes. + Recursively iterate over class members and nested classes. If impl_only is True, do + not yield the classes themselves, only methods. """ # TODO: What should the name be? Or maybe remove it? for name, symnode in names.items(): @@ -5242,9 +5248,21 @@ def local_definitions( fullname = name_prefix + "." + shortname node = symnode.node if node and node.fullname == fullname: - yield fullname, symnode, info + yield_node = True + if impl_only: + if not isinstance(node, (FuncDef, OverloadedFuncDef, Decorator)): + yield_node = False + else: + impl = node.func if isinstance(node, Decorator) else node + # We never type-check generated methods. The generated classes however + # need to be visited, so we don't skip them below. + yield_node = not impl.def_or_infer_vars and not symnode.plugin_generated + if isinstance(node, (FuncDef, OverloadedFuncDef, Decorator)) and "@" in fullname: + yield_node = False + if yield_node: + yield fullname, symnode, info if isinstance(node, TypeInfo): - yield from local_definitions(node.names, fullname, node) + yield from local_definitions(node.names, fullname, node, impl_only) # See docstring for mypy/cache.py for reserved tag ranges. diff --git a/mypy/semanal_infer.py b/mypy/semanal_infer.py index 89a073cdad473..abe9bf18c45e9 100644 --- a/mypy/semanal_infer.py +++ b/mypy/semanal_infer.py @@ -3,7 +3,7 @@ from __future__ import annotations from mypy.nodes import ARG_POS, CallExpr, Decorator, Expression, FuncDef, RefExpr, Var -from mypy.semanal_shared import SemanticAnalyzerInterface +from mypy.semanal_shared import SemanticAnalyzerInterface, set_callable_name from mypy.typeops import function_type from mypy.types import ( AnyType, @@ -23,11 +23,12 @@ def infer_decorator_signature_if_simple( """Try to infer the type of the decorated function. This lets us resolve additional references to decorated functions - during type checking. Otherwise the type might not be available + during type checking. Otherwise, the type might not be available when we need it, since module top levels can't be deferred. This basically uses a simple special-purpose type inference - engine just for decorators. + engine just for decorators. Logic here should be kept in sync with + visit_decorator() in mypy/checker.py. """ if dec.var.is_property: # Decorators are expected to have a callable type (it's a little odd). @@ -58,7 +59,8 @@ def infer_decorator_signature_if_simple( if decorator_preserves_type: # No non-identity decorators left. We can trivially infer the type # of the function here. - dec.var.type = function_type(dec.func, analyzer.named_type("builtins.function")) + sig = function_type(dec.func, analyzer.named_type("builtins.function")) + dec.var.type = set_callable_name(sig, dec.func) if dec.decorators: return_type = calculate_return_type(dec.decorators[0]) if return_type and isinstance(return_type, AnyType): @@ -72,6 +74,8 @@ def infer_decorator_signature_if_simple( orig_sig = function_type(dec.func, analyzer.named_type("builtins.function")) sig.name = orig_sig.items[0].name dec.var.type = sig + if isinstance(sig, CallableType): + sig.definition = dec def is_identity_signature(sig: Type) -> bool: diff --git a/mypy/semanal_newtype.py b/mypy/semanal_newtype.py index 0c717b5d9a0e7..e1d62c4410c9f 100644 --- a/mypy/semanal_newtype.py +++ b/mypy/semanal_newtype.py @@ -258,7 +258,9 @@ def build_newtype_typeinfo( previous_sym = info.names["__init__"].node assert isinstance(previous_sym, FuncDef) updated = old_type != previous_sym.arguments[1].variable.type - info.names["__init__"] = SymbolTableNode(MDEF, init_func) + sym = SymbolTableNode(MDEF, init_func) + sym.plugin_generated = True + info.names["__init__"] = sym if has_placeholder(old_type): self.api.process_placeholder(None, "NewType base", info, force_progress=updated) diff --git a/mypy/test/test_diff_cache.py b/mypy/test/test_diff_cache.py index d35cd99f342d7..86e801b61f984 100644 --- a/mypy/test/test_diff_cache.py +++ b/mypy/test/test_diff_cache.py @@ -126,7 +126,7 @@ def test_diff_cache_produces_valid_json(self) -> None: c_keys = {k for k in keys if "/c." in k or k.startswith("c.")} a_keys = {k for k in keys if "/a." in k or k.startswith("a.")} assert len(a_keys) == 0, f"Unexpected a.* entries in diff: {a_keys}" - assert len(b_keys) == 2, f"Expected 2 b.* entries in diff, got: {b_keys}" + assert len(b_keys) == 3, f"Expected 2 b.* entries in diff, got: {b_keys}" assert len(c_keys) == 4, f"Expected 3 c.* entries in diff, got: {c_keys}" # The new access to a.x in b.py should create a fine-grained diff --git a/mypy/test/testcheck.py b/mypy/test/testcheck.py index 96d6f536b961a..69c8319ad413d 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/testcheck.py @@ -147,6 +147,9 @@ def run_case_once( # Note: do not use this unless really needed! if testcase.name.endswith("_no_parallel"): raise pytest.skip("Test not supported in parallel mode yet") + else: + if testcase.name.endswith("_parallel_only"): + raise pytest.skip("Test is only for parallel mode") if options.native_parser and testcase.name.endswith("_no_native_parse"): raise pytest.skip("Test not supported by native parser yet") @@ -293,8 +296,10 @@ def find_missing_cache_files( ignore_errors = True missing = {} for id, path in modules.items(): - meta, _ = build.find_cache_meta(id, path, manager) - if not build.validate_meta(meta, id, path, ignore_errors, manager): + meta_pair = build.find_cache_meta(id, path, manager) + if meta_pair is None: + missing[id] = path + elif not build.validate_meta(meta_pair[0], id, path, ignore_errors, manager): missing[id] = path return set(missing.values()) diff --git a/test-data/unit/check-final.test b/test-data/unit/check-final.test index 5d2a5cf252895..10943515688e7 100644 --- a/test-data/unit/check-final.test +++ b/test-data/unit/check-final.test @@ -144,7 +144,7 @@ reveal_type(C().f) # N: Revealed type is "Overload(def (x: builtins.int) -> bui [case testFinalDefiningMethOverloadedStubs] from mod import C -reveal_type(C().f) +reveal_type(C().f) # N: Revealed type is "Overload(def (x: builtins.int) -> builtins.int, def (x: builtins.str) -> builtins.str)" [file mod.pyi] from typing import final, overload @@ -157,12 +157,9 @@ class C: @overload def bad(self, x: int) -> int: ... - @final # Error! + @final # E: In a stub file @final must be applied only to the first overload @overload def bad(self, x: str) -> str: ... -[out] -tmp/mod.pyi:12: error: In a stub file @final must be applied only to the first overload -main:3: note: Revealed type is "Overload(def (x: builtins.int) -> builtins.int, def (x: builtins.str) -> builtins.str)" [case testFinalDefiningProperty] from typing import final @@ -1251,7 +1248,9 @@ def check_final_init() -> None: new_instance.__init__() [builtins fixtures/tuple.pyi] -[case testNarrowingOfFinalPersistsInFunctions] +-- This is tricky and expensive to support in parallel mode. +-- We may want to stop supporting this niche use case +[case testNarrowingOfFinalPersistsInFunctions_no_parallel] from typing import Final, Union def _init() -> Union[int, None]: diff --git a/test-data/unit/check-flags.test b/test-data/unit/check-flags.test index 1e6105cbc49c0..7443a63cfd51e 100644 --- a/test-data/unit/check-flags.test +++ b/test-data/unit/check-flags.test @@ -1604,7 +1604,8 @@ class Annotated: [builtins fixtures/plugin_attrs.pyi] -[case testDisallowIncompleteDefsAttrsPartialAnnotations] +-- TODO: this was implemented using a hack that does not work well in parallel mode +[case testDisallowIncompleteDefsAttrsPartialAnnotations_no_parallel] # flags: --disallow-incomplete-defs import attrs diff --git a/test-data/unit/check-functions.test b/test-data/unit/check-functions.test index bff8c03a8fc53..7f0ba55733f7f 100644 --- a/test-data/unit/check-functions.test +++ b/test-data/unit/check-functions.test @@ -1122,7 +1122,7 @@ class A: [case testForwardReferenceToDynamicallyTypedStaticMethod] def f(self) -> None: A.x(1).y - A.x() # E: Missing positional argument "x" in call to "x" + A.x() # E: Missing positional argument "x" in call to "x" of "A" class A: @staticmethod @@ -1144,7 +1144,7 @@ class A: [case testForwardReferenceToDynamicallyTypedClassMethod] def f(self) -> None: A.x(1).y - A.x() # E: Missing positional argument "a" in call to "x" + A.x() # E: Missing positional argument "a" in call to "x" of "A" class A: @classmethod diff --git a/test-data/unit/check-incremental.test b/test-data/unit/check-incremental.test index 5b256b1731e01..5a2933c68b88e 100644 --- a/test-data/unit/check-incremental.test +++ b/test-data/unit/check-incremental.test @@ -1163,7 +1163,8 @@ class A: [out1] main:2: error: "A" has no attribute "bar" -[case testIncrementalChangedError] +-- Order of files unstable in parallel mode +[case testIncrementalChangedError_no_parallel] import m [file m.py] import n @@ -1325,7 +1326,8 @@ tmp/main.py:4: note: Revealed type is "builtins.str" [out2] tmp/main.py:4: note: Revealed type is "Any" -[case testIncrementalFixedBugCausesPropagation] +-- Order of files unstable in parallel mode +[case testIncrementalFixedBugCausesPropagation_no_parallel] import mod1 [file mod1.py] @@ -1365,7 +1367,8 @@ tmp/mod1.py:3: note: Revealed type is "builtins.int" [out2] tmp/mod1.py:3: note: Revealed type is "builtins.int" -[case testIncrementalIncidentalChangeWithBugCausesPropagation] +-- Order of files unstable in parallel mode +[case testIncrementalIncidentalChangeWithBugCausesPropagation_no_parallel] import mod1 [file mod1.py] @@ -1400,12 +1403,12 @@ class C: [out1] tmp/mod3.py:6: error: Incompatible types in assignment (expression has type "str", variable has type "int") tmp/mod1.py:3: note: Revealed type is "builtins.int" - [out2] tmp/mod3.py:6: error: Incompatible types in assignment (expression has type "str", variable has type "int") tmp/mod1.py:3: note: Revealed type is "builtins.str" -[case testIncrementalIncidentalChangeWithBugFixCausesPropagation] +-- Order of files unstable in parallel mode +[case testIncrementalIncidentalChangeWithBugFixCausesPropagation_no_parallel] import mod1 [file mod1.py] @@ -1973,7 +1976,8 @@ main:2: error: Name "nonexisting" is not defined [out2] main:2: error: Name "nonexisting" is not defined -[case testIncrementalInnerClassAttrInMethodReveal] +-- Order of files unstable in parallel mode +[case testIncrementalInnerClassAttrInMethodReveal_no_parallel] import crash reveal_type(crash.C().a) reveal_type(crash.D().a) @@ -4085,7 +4089,8 @@ main:2: error: Argument 2 to "B" has incompatible type "str"; expected "int" [out2] [rechecked b] -[case testIncrementalDataclassesThreeFiles] +-- Order of files unstable in parallel mode +[case testIncrementalDataclassesThreeFiles_no_parallel] from c import C C('foo', 5, True) @@ -5407,7 +5412,8 @@ tmp/c.py:2: note: Revealed type is "b." [out2] tmp/c.py:2: note: Revealed type is "b." -[case testIsInstanceAdHocIntersectionIncrementalIntersectionToUnreachable] +-- Order of files unstable in parallel mode +[case testIsInstanceAdHocIntersectionIncrementalIntersectionToUnreachable_no_parallel] import c [file a.py] class A: @@ -5441,7 +5447,8 @@ tmp/c.py:2: note: Revealed type is "a." tmp/b.py:2: error: Cannot determine type of "y" tmp/c.py:2: note: Revealed type is "Any" -[case testIsInstanceAdHocIntersectionIncrementalUnreachaableToIntersection] +-- Order of files unstable in parallel mode +[case testIsInstanceAdHocIntersectionIncrementalUnreachaableToIntersection_no_parallel] import c [file a.py] class A: diff --git a/test-data/unit/check-inference.test b/test-data/unit/check-inference.test index 3926627df5ad0..f2d3f8212df89 100644 --- a/test-data/unit/check-inference.test +++ b/test-data/unit/check-inference.test @@ -1727,14 +1727,16 @@ b = {} # E: Need type annotation for "b" (hint: "b: dict[, ] = ...") b[{}] = 1 [builtins fixtures/dict.pyi] -[case testInferDictInitializedToEmptyAndUpdatedFromMethod] +-- Parallel mode does not support --no-local-partial-types +[case testInferDictInitializedToEmptyAndUpdatedFromMethod_no_parallel] # flags: --no-local-partial-types map = {} def add() -> None: map[1] = 2 [builtins fixtures/dict.pyi] -[case testInferDictInitializedToEmptyAndUpdatedFromMethodUnannotated] +-- Parallel mode does not support --no-local-partial-types +[case testInferDictInitializedToEmptyAndUpdatedFromMethodUnannotated_no_parallel] # flags: --no-local-partial-types map = {} def add(): @@ -2412,15 +2414,12 @@ class R: [case testMultipassAndMultipleFiles] import m def f() -> None: - x() + x() # E: "int" not callable x = 0 [file m.py] def g() -> None: - y() + y() # E: "int" not callable y = 0 -[out] -tmp/m.py:2: error: "int" not callable -main:3: error: "int" not callable [case testForwardReferenceToDecoratedClassMethod] from typing import TypeVar, Callable @@ -2783,7 +2782,8 @@ x = '' # E: Incompatible types in assignment (expression has type "str", variab def g() -> None: reveal_type(x) # N: Revealed type is "builtins.int | None" -[case testLocalPartialTypesWithGlobalInitializedToNone4] +-- TODO: combine 4 tests below back into 2 when possible. +[case testLocalPartialTypesWithGlobalInitializedToNone4_no_parallel] # flags: --local-partial-types --no-strict-optional a = None @@ -2795,7 +2795,7 @@ a = '' reveal_type(a) # N: Revealed type is "builtins.str" [builtins fixtures/list.pyi] -[case testLocalPartialTypesWithGlobalInitializedToNone5] +[case testLocalPartialTypesWithGlobalInitializedToNone5_no_parallel] # flags: --local-partial-types a = None @@ -2807,6 +2807,30 @@ a = '' reveal_type(a) # N: Revealed type is "builtins.str" [builtins fixtures/list.pyi] +[case testLocalPartialTypesWithGlobalInitializedToNone4_parallel_only] +# flags: --local-partial-types --no-strict-optional +a = None + +def f() -> None: + reveal_type(a) # N: Revealed type is "builtins.str" + +reveal_type(a) # N: Revealed type is "None" +a = '' +reveal_type(a) # N: Revealed type is "builtins.str" +[builtins fixtures/list.pyi] + +[case testLocalPartialTypesWithGlobalInitializedToNone5_parallel_only] +# flags: --local-partial-types +a = None + +def f() -> None: + reveal_type(a) # N: Revealed type is "builtins.str | None" + +reveal_type(a) # N: Revealed type is "None" +a = '' +reveal_type(a) # N: Revealed type is "builtins.str" +[builtins fixtures/list.pyi] + [case testLocalPartialTypesWithClassAttributeInitializedToNone] # flags: --local-partial-types class A: diff --git a/test-data/unit/check-kwargs.test b/test-data/unit/check-kwargs.test index c8653143bd9dd..f11c2b6f4fc40 100644 --- a/test-data/unit/check-kwargs.test +++ b/test-data/unit/check-kwargs.test @@ -469,16 +469,13 @@ A.B(x=1) # E: Unexpected keyword argument "x" for "B" [case testUnexpectedMethodKwargFromOtherModule] import m -m.A(x=1) +m.A(x=1) # E: Unexpected keyword argument "x" for "A" \ + # N: "A" defined in "m" [file m.py] -1+'asdf' +1+'asdf' # E: Unsupported operand types for + ("int" and "str") class A: def __init__(self) -> None: pass -[out] -tmp/m.py:1: error: Unsupported operand types for + ("int" and "str") -main:2: error: Unexpected keyword argument "x" for "A" -main:2: note: "A" defined in "m" [case testMissingNamedArgumentFromOtherModule] import m diff --git a/test-data/unit/check-modules.test b/test-data/unit/check-modules.test index 3f24bc614c61e..74dee17e80047 100644 --- a/test-data/unit/check-modules.test +++ b/test-data/unit/check-modules.test @@ -365,23 +365,20 @@ main:4: error: Too many arguments for "x" [case testRelativeImports] import typing import m.a -m.a.x = m.a.y # Error +m.a.x = m.a.y # E: Incompatible types in assignment (expression has type "B", variable has type "A") [file m/__init__.py] [file m/a.py] import typing from .b import A, B, x, y z = x if int(): - z = y # Error + z = y # E: Incompatible types in assignment (expression has type "B", variable has type "A") [file m/b.py] import typing class A: pass class B: pass x = A() y = B() -[out] -tmp/m/a.py:5: error: Incompatible types in assignment (expression has type "B", variable has type "A") -main:3: error: Incompatible types in assignment (expression has type "B", variable has type "A") [case testRelativeImports2] import typing @@ -497,15 +494,11 @@ def f(x: int = ...) -> None: pass [case testEllipsisDefaultParamValueInStub2] import m -def f1(x: int = ...) -> int: return 1 -def f2(x: int = '') -> int: return 1 +def f1(x: int = ...) -> int: return 1 # E: Incompatible default for parameter "x" (default has type "ellipsis", parameter has type "int") +def f2(x: int = '') -> int: return 1 # E: Incompatible default for parameter "x" (default has type "str", parameter has type "int") [file m.pyi] def g1(x: int = ...) -> int: pass -def g2(x: int = '') -> int: pass -[out] -tmp/m.pyi:2: error: Incompatible default for parameter "x" (default has type "str", parameter has type "int") -main:2: error: Incompatible default for parameter "x" (default has type "ellipsis", parameter has type "int") -main:3: error: Incompatible default for parameter "x" (default has type "str", parameter has type "int") +def g2(x: int = '') -> int: pass # E: Incompatible default for parameter "x" (default has type "str", parameter has type "int") [case testEllipsisDefaultParamValueInNonStub] def ok_1(x: int = ...) -> None: pass @@ -523,7 +516,6 @@ def bad_2(x: int = ...) -> None: # E: Incompatible default for parameter "x" def bad_3(x: int = ...) -> None: # E: Incompatible default for parameter "x" (default has type "ellipsis", parameter has type "int") raise Exception("Some other exception") [builtins fixtures/exception.pyi] -[out] [case testEllipsisDefaultParamValueInNonStubsOverload] from typing import overload, Union @@ -544,7 +536,6 @@ def bar(x: str, y: str = ...) -> str: ... def bar(x: Both, y: Both = ...) -> Both: raise NotImplementedError [builtins fixtures/exception.pyi] -[out] [case testEllipsisDefaultParamValueInNonStubsMethods] from typing import Generic, Protocol, TypeVar @@ -564,7 +555,6 @@ class MyAbstractClass: @abstractmethod def default_impl(self, x: Wrap[int] = ...) -> int: return 3 # E: Incompatible default for parameter "x" (default has type "ellipsis", parameter has type "Wrap[int]") [builtins fixtures/exception.pyi] -[out] [case testStarImportOverlapping] from m1 import * @@ -2083,29 +2073,19 @@ def __getattr__(name: str) -> str: ... [case testModuleLevelGetattrInvalidSignature] import has_getattr -reveal_type(has_getattr.any_attribute) +reveal_type(has_getattr.any_attribute) # N: Revealed type is "builtins.str" [file has_getattr.pyi] -def __getattr__(x: int, y: str) -> str: ... - -[out] -tmp/has_getattr.pyi:1: error: Invalid signature "Callable[[int, str], str]" for "__getattr__" -main:3: note: Revealed type is "builtins.str" - +def __getattr__(x: int, y: str) -> str: ... # E: Invalid signature "Callable[[int, str], str]" for "__getattr__" [builtins fixtures/module.pyi] [case testModuleLevelGetattrNotCallable] import has_getattr -reveal_type(has_getattr.any_attribute) +reveal_type(has_getattr.any_attribute) # N: Revealed type is "Any" [file has_getattr.pyi] -__getattr__ = 3 - -[out] -tmp/has_getattr.pyi:1: error: Invalid signature "int" for "__getattr__" -main:3: note: Revealed type is "Any" - +__getattr__ = 3 # E: Invalid signature "int" for "__getattr__" [builtins fixtures/module.pyi] [case testModuleLevelGetattrUntyped] @@ -2195,17 +2175,13 @@ __getattr__ = make_getattr_good() # OK [case testModuleLevelGetattrAssignedBad] import non_stub -reveal_type(non_stub.name) +reveal_type(non_stub.name) # N: Revealed type is "builtins.int" [file non_stub.py] from typing import Callable def make_getattr_bad() -> Callable[[], int]: ... -__getattr__ = make_getattr_bad() - -[out] -tmp/non_stub.py:4: error: Invalid signature "Callable[[], int]" for "__getattr__" -main:2: note: Revealed type is "builtins.int" +__getattr__ = make_getattr_bad() # E: Invalid signature "Callable[[], int]" for "__getattr__" [case testModuleLevelGetattrImportedGood] import non_stub @@ -2219,18 +2195,13 @@ def __getattr__(name: str) -> int: ... [case testModuleLevelGetattrImportedBad] import non_stub -reveal_type(non_stub.name) +reveal_type(non_stub.name) # N: Revealed type is "builtins.int" [file non_stub.py] from has_getattr import __getattr__ [file has_getattr.py] -def __getattr__() -> int: ... - -[out] -tmp/has_getattr.py:1: error: Invalid signature "Callable[[], int]" for "__getattr__" -main:2: note: Revealed type is "builtins.int" - +def __getattr__() -> int: ... # E: Invalid signature "Callable[[], int]" for "__getattr__" [builtins fixtures/module.pyi] -- Parallel mode gives only_once notes once *per worker* diff --git a/test-data/unit/check-narrowing.test b/test-data/unit/check-narrowing.test index 93a7207288761..d8176200efdb6 100644 --- a/test-data/unit/check-narrowing.test +++ b/test-data/unit/check-narrowing.test @@ -2685,7 +2685,8 @@ def fn_while(arg: T) -> None: return None [builtins fixtures/primitives.pyi] -[case testRefinePartialTypeWithinLoop] +-- Parallel mode does not support --no-local-partial-types +[case testRefinePartialTypeWithinLoop_no_parallel] # flags: --no-local-partial-types --strict-equality --warn-unreachable x = None diff --git a/test-data/unit/check-newsemanal.test b/test-data/unit/check-newsemanal.test index ad3ea03110c0b..4148d04014a81 100644 --- a/test-data/unit/check-newsemanal.test +++ b/test-data/unit/check-newsemanal.test @@ -231,14 +231,14 @@ import a class T1(TypedDict): x: A class A: pass -reveal_type(T1(x=A())) # E +reveal_type(T1(x=A())) # N: Revealed type is "TypedDict('__main__.T1', {'x': __main__.A})" [file a.py] from typing import TypedDict from b import TD1 as TD2, TD3 class T2(TD3): x: int -reveal_type(T2(x=2)) # E +reveal_type(T2(x=2)) # N: Revealed type is "TypedDict('a.T2', {'x': builtins.int})" [file b.py] from a import TypedDict as TD1 @@ -246,11 +246,6 @@ from a import TD2 as TD3 [builtins fixtures/dict.pyi] [typing fixtures/typing-typeddict.pyi] -[out] -tmp/a.py:5: note: Revealed type is "TypedDict('a.T2', {'x': builtins.int})" -main:6: note: Revealed type is "TypedDict('__main__.T1', {'x': __main__.A})" - - [case testNewAnalyzerTypedDictClassInheritance] from typing import TypedDict @@ -2783,13 +2778,13 @@ t = typing.typevar('t') # E: Module has no attribute "typevar" [case testNewAnalyzerImportFromTopLevelFunction] import a.b # This works at runtime -reveal_type(a.b) # N +reveal_type(a.b) # N: Revealed type is "def ()" [file a/__init__.py] from .b import B from . import b as c def b() -> None: pass -reveal_type(b) # N -reveal_type(c.B()) # N +reveal_type(b) # N: Revealed type is "def ()" +reveal_type(c.B()) # N: Revealed type is "a.b.B" x: Forward class Forward: ... @@ -2798,21 +2793,16 @@ class Forward: class B: ... [builtins fixtures/module.pyi] -[out] -tmp/a/__init__.py:4: note: Revealed type is "def ()" -tmp/a/__init__.py:5: note: Revealed type is "a.b.B" -main:2: note: Revealed type is "def ()" - [case testNewAnalyzerImportFromTopLevelAlias] import a.b # This works at runtime -reveal_type(a.b) # N +reveal_type(a.b) # N: Revealed type is "def () -> builtins.int" [file a/__init__.py] from .b import B from . import b as c b = int y: b -reveal_type(y) # N -reveal_type(c.B) # N +reveal_type(y) # N: Revealed type is "builtins.int" +reveal_type(c.B) # N: Revealed type is "def () -> a.b.B" x: Forward class Forward: ... @@ -2821,21 +2811,16 @@ class Forward: class B: ... [builtins fixtures/module.pyi] -[out] -tmp/a/__init__.py:5: note: Revealed type is "builtins.int" -tmp/a/__init__.py:6: note: Revealed type is "def () -> a.b.B" -main:2: note: Revealed type is "def () -> builtins.int" - [case testNewAnalyzerImportAmbiguousWithTopLevelFunction] import a.b # This works at runtime -x: a.b.B # E -reveal_type(a.b) # N +x: a.b.B # E: Name "a.b.B" is not defined +reveal_type(a.b) # N: Revealed type is "def ()" [file a/__init__.py] import a.b import a.b as c def b() -> None: pass -reveal_type(b) # N -reveal_type(c.B()) # N +reveal_type(b) # N: Revealed type is "def ()" +reveal_type(c.B()) # N: Revealed type is "a.b.B" x: Forward class Forward: ... @@ -2844,12 +2829,6 @@ class Forward: class B: ... [builtins fixtures/module.pyi] -[out] -tmp/a/__init__.py:4: note: Revealed type is "def ()" -tmp/a/__init__.py:5: note: Revealed type is "a.b.B" -main:2: error: Name "a.b.B" is not defined -main:3: note: Revealed type is "def ()" - [case testNewAnalyzerConfusingImportConflictingNames] # flags: --follow-imports=skip --ignore-missing-imports # cmd: mypy -m other a.b a diff --git a/test-data/unit/check-overloading.test b/test-data/unit/check-overloading.test index 8616a1b6d165f..00924cb3f8889 100644 --- a/test-data/unit/check-overloading.test +++ b/test-data/unit/check-overloading.test @@ -5280,33 +5280,24 @@ def lol(x: int, y: int) -> int: [case testVeryBrokenOverload] import lib -reveal_type(lib.func) +reveal_type(lib.func) # N: Revealed type is "Any" [file lib.pyi] -@overload +@overload # E: Name "overload" is not defined def func(x: int) -> int: ... -def func(x): +def func(x): # E: Name "func" already defined on line 1 return x -[out] -tmp/lib.pyi:1: error: Name "overload" is not defined -tmp/lib.pyi:4: error: Name "func" already defined on line 1 -main:2: note: Revealed type is "Any" -- Order of errors is different [case testVeryBrokenOverload2] import lib -reveal_type(lib.func) +reveal_type(lib.func) # N: Revealed type is "Any" [file lib.pyi] -@overload +@overload # E: Name "overload" is not defined def func(x: int) -> int: ... -@overload +@overload # E: Name "func" already defined on line 1 # E: Name "overload" is not defined def func(x: str) -> str: ... -[out] -tmp/lib.pyi:1: error: Name "overload" is not defined -tmp/lib.pyi:3: error: Name "func" already defined on line 1 -tmp/lib.pyi:3: error: Name "overload" is not defined -main:3: note: Revealed type is "Any" [case testLiteralSubtypeOverlap] from typing import Literal, overload diff --git a/test-data/unit/check-protocols.test b/test-data/unit/check-protocols.test index 511461894db73..87fd30e437170 100644 --- a/test-data/unit/check-protocols.test +++ b/test-data/unit/check-protocols.test @@ -4558,7 +4558,30 @@ def bad() -> Proto: # N: Expected: \ # N: def f(self) -> int \ # N: Got: \ - # N: def f() -> str \ + # N: def f(self) -> str \ + +class Impl: + @defer + def f(self) -> int: ... + +[case testProtocolCheckDefersNode2] +from typing import Any, Callable, Protocol, TypeVar + +class Proto(Protocol): + def f(self) -> int: + ... + +T = TypeVar("T") +def defer(f: Callable[[T], int]) -> Callable[[T], list[int]]: + ... + +def bad() -> Proto: + return Impl() # E: Incompatible return value type (got "Impl", expected "Proto") \ + # N: Following member(s) of "Impl" have conflicts: \ + # N: Expected: \ + # N: def f(self) -> int \ + # N: Got: \ + # N: def f(self) -> list[int] \ class Impl: @defer diff --git a/test-data/unit/check-python312.test b/test-data/unit/check-python312.test index 122bf5df14e7c..2e1f4f863464e 100644 --- a/test-data/unit/check-python312.test +++ b/test-data/unit/check-python312.test @@ -280,6 +280,9 @@ class Covariant[T]: class Invariant[T]: def f(self) -> None: + # We add this to force processing this method as part of the interface in parallel checking, + # otherwise, it will be processed at a later stage, not testing the no ready behavior below + self.y = int() c = Invariant(1) # We need to know that T is invariant here, and for this we need the type # of self.x, which won't be available on the first type checking pass, @@ -306,6 +309,7 @@ if int(): [case testPEP695InferVarianceNotReadyForJoin] class Invariant[T]: def f(self) -> None: + self.y = int() # Assume covariance if variance us not ready reveal_type([Invariant(1), Invariant(object())]) \ # N: Revealed type is "builtins.list[__main__.Invariant[builtins.object]]" @@ -326,6 +330,7 @@ def a2(x: Invariant[object]) -> None: pass class Invariant[T]: def f(self) -> None: + self.y = int() reveal_type(c(a1, a2)) # N: Revealed type is "__main__.Invariant[builtins.int]" def __init__(self, x: T) -> None: diff --git a/test-data/unit/check-redefine.test b/test-data/unit/check-redefine.test index 98c503bf4a912..c5636c28caf0a 100644 --- a/test-data/unit/check-redefine.test +++ b/test-data/unit/check-redefine.test @@ -412,39 +412,29 @@ def f() -> A: return A() [case testRedefineGlobalWithDifferentType] # flags: --allow-redefinition import m -reveal_type(m.x) +reveal_type(m.x) # N: Revealed type is "builtins.str" [file m.py] x = 0 -reveal_type(x) +reveal_type(x) # N: Revealed type is "builtins.int" x = object() -reveal_type(x) +reveal_type(x) # N: Revealed type is "builtins.object" x = '' -reveal_type(x) -[out] -tmp/m.py:2: note: Revealed type is "builtins.int" -tmp/m.py:4: note: Revealed type is "builtins.object" -tmp/m.py:6: note: Revealed type is "builtins.str" -main:3: note: Revealed type is "builtins.str" +reveal_type(x) # N: Revealed type is "builtins.str" [case testRedefineGlobalForIndex] # flags: --allow-redefinition import m -reveal_type(m.x) +reveal_type(m.x) # N: Revealed type is "builtins.str" [file m.py] from typing import Iterable def f(): pass it1: Iterable[int] = f() it2: Iterable[str] = f() for x in it1: - reveal_type(x) + reveal_type(x) # N: Revealed type is "builtins.int" for x in it2: - reveal_type(x) -reveal_type(x) -[out] -tmp/m.py:6: note: Revealed type is "builtins.int" -tmp/m.py:8: note: Revealed type is "builtins.str" -tmp/m.py:9: note: Revealed type is "builtins.str" -main:3: note: Revealed type is "builtins.str" + reveal_type(x) # N: Revealed type is "builtins.str" +reveal_type(x) # N: Revealed type is "builtins.str" [case testRedefineGlobalBasedOnPreviousValues] # flags: --allow-redefinition