diff options
-rw-r--r-- | SConstruct | 29 | ||||
-rw-r--r-- | site_scons/libdeps.py | 881 | ||||
-rw-r--r-- | site_scons/libdeps_next.py | 1430 | ||||
-rw-r--r-- | site_scons/site_tools/icecream.py | 8 | ||||
-rw-r--r-- | site_scons/site_tools/mongo_test_execution.py | 117 | ||||
-rw-r--r-- | site_scons/site_tools/next/ccache.py | 172 | ||||
-rw-r--r-- | site_scons/site_tools/next/icecream.py | 585 | ||||
-rw-r--r-- | site_scons/site_tools/next/mongo_test_execution.py | 169 | ||||
-rw-r--r-- | site_scons/site_tools/next/ninja.py | 1684 | ||||
-rw-r--r-- | site_scons/site_tools/ninja.py | 72 | ||||
-rw-r--r-- | src/SConscript | 75 | ||||
-rw-r--r-- | src/mongo/embedded/mongo_embedded/SConscript | 16 | ||||
-rw-r--r-- | src/mongo/embedded/stitch_support/SConscript | 16 | ||||
-rw-r--r-- | src/shim_crt.cpp | 31 | ||||
-rw-r--r-- | src/shim_cxx.cpp | 31 |
15 files changed, 875 insertions, 4441 deletions
diff --git a/SConstruct b/SConstruct index f5b908fbc62..fb2198b170a 100644 --- a/SConstruct +++ b/SConstruct @@ -817,7 +817,7 @@ env_vars.Add('CXXFLAGS', converter=variable_shlex_converter) default_destdir = '$BUILD_ROOT/install' -if get_option('ninja') != 'disabled' and get_option('build-tools') == 'next': +if get_option('ninja') != 'disabled': # Workaround for SERVER-53952 where issues wih different # ninja files building to the same install dir. Different # ninja files need to build to different install dirs. @@ -1865,21 +1865,13 @@ if env['_LIBDEPS'] == '$_LIBDEPS_OBJS': # command but instead runs a function. env["BUILDERS"]["StaticLibrary"].action = SCons.Action.Action(write_uuid_to_file, "Generating placeholder library $TARGET") -if get_option('build-tools') == 'next': - import libdeps_next as libdeps +import libdeps - libdeps.setup_environment( - env, - emitting_shared=(link_model.startswith("dynamic")), - debug=get_option('libdeps-debug'), - linting=get_option('libdeps-linting')) -else: - import libdeps - - libdeps.setup_environment( - env, - emitting_shared=(link_model.startswith("dynamic")), - linting=get_option('libdeps-linting')) +libdeps.setup_environment( + env, + emitting_shared=(link_model.startswith("dynamic")), + debug=get_option('libdeps-debug'), + linting=get_option('libdeps-linting')) # Both the abidw tool and the thin archive tool must be loaded after # libdeps, so that the scanners they inject can see the library @@ -4523,7 +4515,7 @@ if get_option('ninja') != 'disabled': env['NINJA_REGENERATE_DEPS'] = ninja_generate_deps - if get_option('build-tools') == 'next' and env.TargetOSIs("windows"): + if env.TargetOSIs("windows"): # This is a workaround on windows for SERVER-48691 where the line length # in response files is too long: # https://developercommunity.visualstudio.com/content/problem/441978/fatal-error-lnk1170-line-in-command-file-contains.html @@ -5300,6 +5292,5 @@ for i, s in enumerate(BUILD_TARGETS): # Do any final checks the Libdeps linter may need to do once all # SConscripts have been read but before building begins. -if get_option('build-tools') == 'next': - libdeps.LibdepLinter(env).final_checks() - libdeps.generate_libdeps_graph(env) +libdeps.LibdepLinter(env).final_checks() +libdeps.generate_libdeps_graph(env) diff --git a/site_scons/libdeps.py b/site_scons/libdeps.py index 8dd859222e3..ddf1bc93631 100644 --- a/site_scons/libdeps.py +++ b/site_scons/libdeps.py @@ -51,20 +51,39 @@ automatically added when missing. # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -from collections import OrderedDict +from collections import defaultdict +from functools import partial +import enum import copy +import json import os +import sys +import glob import textwrap +import hashlib +import json +import fileinput + +try: + import networkx + from buildscripts.libdeps.libdeps.graph import EdgeProps, NodeProps, LibdepsGraph +except ImportError: + pass import SCons.Errors import SCons.Scanner import SCons.Util +import SCons +from SCons.Script import COMMAND_LINE_TARGETS + class Constants: Libdeps = "LIBDEPS" LibdepsCached = "LIBDEPS_cached" LibdepsDependents = "LIBDEPS_DEPENDENTS" + LibdepsGlobal = "LIBDEPS_GLOBAL" + LibdepsNoInherit = "LIBDEPS_NO_INHERIT" LibdepsInterface ="LIBDEPS_INTERFACE" LibdepsPrivate = "LIBDEPS_PRIVATE" LibdepsTags = "LIBDEPS_TAGS" @@ -75,9 +94,26 @@ class Constants: SysLibdepsCached = "SYSLIBDEPS_cached" SysLibdepsPrivate = "SYSLIBDEPS_PRIVATE" -class dependency: - Public, Private, Interface = list(range(3)) +class deptype(tuple, enum.Enum): + Global: tuple = (0, 'GLOBAL') + Public: tuple = (1, 'PUBLIC') + Private: tuple = (2, 'PRIVATE') + Interface: tuple = (3, 'INTERFACE') + + def __lt__(self, other): + if self.__class__ is other.__class__: + return self.value[0] < other.value[0] + return NotImplemented + + def __str__(self): + return self.value[1] + + def __int__(self): + return self.value[0] + + +class dependency: def __init__(self, value, deptype, listed_name): self.target_node = value self.dependency_type = deptype @@ -86,6 +122,7 @@ class dependency: def __str__(self): return str(self.target_node) + class FlaggedLibdep: """ Utility class used for processing prefix and postfix flags on libdeps. The class @@ -198,6 +235,8 @@ class LibdepLinter: linting_rules_run = 0 registered_linting_time = False + dangling_dep_dependents = set() + @staticmethod def _make_linter_decorator(): """ @@ -212,9 +251,30 @@ class LibdepLinter: linter_rule_func.all = funcs return linter_rule_func + linter_rule = _make_linter_decorator.__func__() + linter_final_check = _make_linter_decorator.__func__() + + @classmethod + def _skip_linting(cls): + return cls.skip_linting + + @classmethod + def _start_timer(cls): + # Record time spent linting if we are in print mode. + if cls.print_linter_errors: + from timeit import default_timer as timer + return timer() + + @classmethod + def _stop_timer(cls, start, num_rules): + # Record time spent linting if we are in print mode. + if cls.print_linter_errors: + from timeit import default_timer as timer + cls.linting_time += timer() - start + cls.linting_rules_run += num_rules - def __init__(self, env, target): + def __init__(self, env, target=None): self.env = env self.target = target self.unique_libs = set() @@ -239,13 +299,9 @@ class LibdepLinter: # Build performance optimization if you # are sure your build is clean. - if self.__class__.skip_linting: + if self._skip_linting(): return - - # Record time spent linting if we are in print mode. - if self.__class__.print_linter_errors: - from timeit import default_timer as timer - start = timer() + start = self._start_timer() linter_rules = [ getattr(self, linter_rule) @@ -256,9 +312,24 @@ class LibdepLinter: for linter_rule in linter_rules: linter_rule(libdep) - if self.__class__.print_linter_errors: - self.__class__.linting_time += timer() - start - self.__class__.linting_rules_run += (len(linter_rules)*len(libdeps)) + self._stop_timer(start, len(linter_rules)*len(libdeps)) + + def final_checks(self): + # Build performance optimization if you + # are sure your build is clean. + if self._skip_linting(): + return + start = self._start_timer() + + linter_rules = [ + getattr(self.__class__, rule) + for rule in self.__class__.linter_final_check.all + ] + + for linter_rule in linter_rules: + linter_rule(self) + + self._stop_timer(start, len(linter_rules)) def _raise_libdep_lint_exception(self, message): """ @@ -296,10 +367,17 @@ class LibdepLinter: def _get_deps_dependents(self, env=None): """ util function to get all types of DEPS_DEPENDENTS""" target_env = env if env else self.env - deps_dependents = target_env.get(Constants.LibdepsDependents, []) + deps_dependents = target_env.get(Constants.LibdepsDependents, []).copy() deps_dependents += target_env.get(Constants.ProgdepsDependents, []) return deps_dependents + def _get_deps_dependents_with_types(self, builder, type): + return [ + (dependent[0], builder) if isinstance(dependent, tuple) else + (dependent, builder) + for dependent in self.env.get(type, []) + ] + @linter_rule def linter_rule_leaf_node_no_deps(self, libdep): """ @@ -315,6 +393,11 @@ class LibdepLinter: if self._check_for_lint_tags('lint-leaf-node-allowed-dep', libdep.target_node.env): return + # Global dependencies will apply to leaf nodes, so they should + # be automatically exempted. + if libdep.dependency_type == deptype.Global: + return + target_type = self.target[0].builder.get_name(self.env) lib = os.path.basename(str(libdep)) self._raise_libdep_lint_exception( @@ -324,6 +407,37 @@ class LibdepLinter: )) @linter_rule + def linter_rule_no_dangling_deps(self, libdep): + """ + LIBDEP RULE: + All reverse dependency edges must point to a node which will be built. + """ + if self._check_for_lint_tags('lint-allow-dangling-dep-dependent'): + return + + # Gather the DEPS_DEPENDENTS and store them for a final check to make sure they were + # eventually defined as being built by some builder + libdep_libbuilder = self.target[0].builder.get_name(self.env) + deps_depends = self._get_deps_dependents_with_types(libdep_libbuilder, Constants.LibdepsDependents) + deps_depends += self._get_deps_dependents_with_types("Program", Constants.ProgdepsDependents) + self.__class__.dangling_dep_dependents.update(deps_depends) + + @linter_final_check + def linter_rule_no_dangling_dep_final_check(self): + # At this point the SConscripts have defined all the build items, + # and so we can go check any DEPS_DEPENDENTS listed and make sure a builder + # was instanciated to build them. + for dep_dependent in self.__class__.dangling_dep_dependents: + dep_node = _get_node_with_ixes(self.env, dep_dependent[0], dep_dependent[1]) + if not dep_node.has_builder(): + self._raise_libdep_lint_exception( + textwrap.dedent(f"""\ + Found reverse dependency linked to node '{dep_node}' + which will never be built by any builder. + Remove the reverse dependency or add a way to build it.""" + )) + + @linter_rule def linter_rule_no_public_deps(self, libdep): """ LIBDEP RULE: @@ -333,7 +447,7 @@ class LibdepLinter: if not self._check_for_lint_tags('lint-no-public-deps', inclusive_tag=True): return - if libdep.dependency_type != dependency.Private: + if libdep.dependency_type not in (deptype.Global, deptype.Private): # Check if the libdep exempts itself from this rule. if self._check_for_lint_tags('lint-public-dep-allowed', libdep.target_node.env): return @@ -399,7 +513,7 @@ class LibdepLinter: return if (self.target[0].builder.get_name(self.env) == "Program" - and libdep.dependency_type != dependency.Public): + and libdep.dependency_type not in (deptype.Global, deptype.Public)): lib = os.path.basename(str(libdep)) self._raise_libdep_lint_exception( @@ -445,7 +559,7 @@ class LibdepLinter: if self._check_for_lint_tags('lint-allow-nonprivate-on-deps-dependents'): return - if (libdep.dependency_type != dependency.Private + if (libdep.dependency_type != deptype.Private and libdep.dependency_type != deptype.Global and len(self._get_deps_dependents()) > 0): target_type = self.target[0].builder.get_name(self.env) @@ -476,26 +590,29 @@ class LibdepLinter: target_type = self.target[0].builder.get_name(self.env) self._raise_libdep_lint_exception(textwrap.dedent(f"""\ - Found non-list type '{libdeps_list}' while evaluating {dep_type_val} for {target_type} '{self.target[0]}' - {dep_type_val} must be setup as a list.""" + Found non-list type '{libdeps_list}' while evaluating {dep_type_val[1]} for {target_type} '{self.target[0]}' + {dep_type_val[1]} must be setup as a list.""" )) dependency_visibility_ignored = { - dependency.Public: dependency.Public, - dependency.Private: dependency.Public, - dependency.Interface: dependency.Public, + deptype.Global: deptype.Public, + deptype.Interface: deptype.Public, + deptype.Public: deptype.Public, + deptype.Private: deptype.Public, } dependency_visibility_honored = { - dependency.Public: dependency.Public, - dependency.Private: dependency.Private, - dependency.Interface: dependency.Interface, + deptype.Global: deptype.Private, + deptype.Interface: deptype.Interface, + deptype.Public: deptype.Public, + deptype.Private: deptype.Private, } dep_type_to_env_var = { - dependency.Public: Constants.Libdeps, - dependency.Private: Constants.LibdepsPrivate, - dependency.Interface: Constants.LibdepsInterface, + deptype.Global: Constants.LibdepsGlobal, + deptype.Interface: Constants.LibdepsInterface, + deptype.Public: Constants.Libdeps, + deptype.Private: Constants.LibdepsPrivate, } class DependencyCycleError(SCons.Errors.UserError): @@ -516,17 +633,23 @@ class LibdepLinterError(SCons.Errors.UserError): class MissingSyslibdepError(SCons.Errors.UserError): """Exception representing a discongruent usages of libdeps""" -def __get_sorted_direct_libdeps(node): - direct_sorted = getattr(node.attributes, "libdeps_direct_sorted", False) - if not direct_sorted: +def _get_sorted_direct_libdeps(node): + direct_sorted = getattr(node.attributes, "libdeps_direct_sorted", None) + if direct_sorted is None: direct = getattr(node.attributes, "libdeps_direct", []) direct_sorted = sorted(direct, key=lambda t: str(t.target_node)) setattr(node.attributes, "libdeps_direct_sorted", direct_sorted) return direct_sorted -def __libdeps_visit(n, marked, tsorted, walking): - if n.target_node in marked: +class LibdepsVisitationMark(enum.IntEnum): + UNMARKED = 0 + MARKED_PRIVATE = 1 + MARKED_PUBLIC = 2 + + +def _libdeps_visit_private(n, marked, walking, debug=False): + if marked[n.target_node] >= LibdepsVisitationMark.MARKED_PRIVATE: return if n.target_node in walking: @@ -535,11 +658,78 @@ def __libdeps_visit(n, marked, tsorted, walking): walking.add(n.target_node) try: - for child in __get_sorted_direct_libdeps(n.target_node): - if child.dependency_type != dependency.Private: - __libdeps_visit(child, marked, tsorted, walking=walking) + for child in _get_sorted_direct_libdeps(n.target_node): + _libdeps_visit_private(child, marked, walking) - marked.add(n.target_node) + marked[n.target_node] = LibdepsVisitationMark.MARKED_PRIVATE + + except DependencyCycleError as e: + if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[-1]: + e.cycle_nodes.insert(0, n.target_node) + raise + + finally: + walking.remove(n.target_node) + + +def _libdeps_visit(n, tsorted, marked, walking, debug=False): + # The marked dictionary tracks which sorts of visitation a node + # has received. Values for a given node can be UNMARKED/absent, + # MARKED_PRIVATE, or MARKED_PUBLIC. These are to be interpreted as + # follows: + # + # 0/UNMARKED: Node is not not marked. + # + # MARKED_PRIVATE: Node has only been explored as part of looking + # for cycles under a LIBDEPS_PRIVATE edge. + # + # MARKED_PUBLIC: Node has been explored and any of its transiive + # dependencies have been incorporated into `tsorted`. + # + # The __libdeps_visit_private function above will only mark things + # at with MARKED_PRIVATE, while __libdeps_visit will mark things + # MARKED_PUBLIC. + if marked[n.target_node] == LibdepsVisitationMark.MARKED_PUBLIC: + return + + # The walking set is used for cycle detection. We record all our + # predecessors in our depth-first search, and if we observe one of + # our predecessors as a child, we know we have a cycle. + if n.target_node in walking: + raise DependencyCycleError(n.target_node) + + walking.add(n.target_node) + + if debug: + print(f" * {n.dependency_type} => {n.listed_name}") + + try: + children = _get_sorted_direct_libdeps(n.target_node) + + # We first walk all of our public dependencies so that we can + # put full marks on anything that is in our public transitive + # graph. We then do a second walk into any private nodes to + # look for cycles. While we could do just one walk over the + # children, it is slightly faster to do two passes, since if + # the algorithm walks into a private edge early, it would do a + # lot of non-productive (except for cycle checking) walking + # and marking, but if another public path gets into that same + # subtree, then it must walk and mark it again to raise it to + # the public mark level. Whereas, if the algorithm first walks + # the whole public tree, then those are all productive marks + # and add to tsorted, and then the private walk will only need + # to examine those things that are only reachable via private + # edges. + + for child in children: + if child.dependency_type != deptype.Private: + _libdeps_visit(child, tsorted, marked, walking, debug) + + for child in children: + if child.dependency_type == deptype.Private: + _libdeps_visit_private(child, marked, walking, debug) + + marked[n.target_node] = LibdepsVisitationMark.MARKED_PUBLIC tsorted.append(n.target_node) except DependencyCycleError as e: @@ -547,8 +737,11 @@ def __libdeps_visit(n, marked, tsorted, walking): e.cycle_nodes.insert(0, n.target_node) raise + finally: + walking.remove(n.target_node) + -def __get_libdeps(node): +def _get_libdeps(node, debug=False): """Given a SCons Node, return its library dependencies, topologically sorted. Computes the dependencies if they're not already cached. @@ -556,123 +749,146 @@ def __get_libdeps(node): cache = getattr(node.attributes, Constants.LibdepsCached, None) if cache is not None: + if debug: + print(" Cache:") + for dep in cache: + print(f" * {str(dep)}") return cache + if debug: + print(f" Edges:") + tsorted = [] - marked = set() - walking = set() - for child in __get_sorted_direct_libdeps(node): - if child.dependency_type != dependency.Interface: - __libdeps_visit(child, marked, tsorted, walking) + marked = defaultdict(lambda: LibdepsVisitationMark.UNMARKED) + walking = set() + for child in _get_sorted_direct_libdeps(node): + if child.dependency_type != deptype.Interface: + _libdeps_visit(child, tsorted, marked, walking, debug=debug) tsorted.reverse() - setattr(node.attributes, Constants.LibdepsCached, tsorted) + setattr(node.attributes, Constants.LibdepsCached, tsorted) return tsorted -def __missing_syslib(name): +def _missing_syslib(name): return Constants.MissingLibdep + name -def update_scanner(builder): +def update_scanner(env, builder_name=None, debug=False): """Update the scanner for "builder" to also scan library dependencies.""" + builder = env["BUILDERS"][builder_name] old_scanner = builder.target_scanner if old_scanner: path_function = old_scanner.path_function - - def new_scanner(node, env, path=()): - result = old_scanner.function(node, env, path) - result.extend(__get_libdeps(node)) - return result - else: path_function = None - def new_scanner(node, env, path=()): - return __get_libdeps(node) + def new_scanner(node, env, path=()): + if debug: + print(f"LIBDEPS SCANNER: {str(node)}") + print(f" Declared dependencies:") + print(f" global: {env.get(Constants.LibdepsGlobal, None)}") + print(f" private: {env.get(Constants.LibdepsPrivate, None)}") + print(f" public: {env.get(Constants.Libdeps, None)}") + print(f" interface: {env.get(Constants.LibdepsInterface, None)}") + print(f" no_inherit: {env.get(Constants.LibdepsNoInherit, None)}") + + if old_scanner: + result = old_scanner.function(node, env, path) + else: + result = [] + result.extend(_get_libdeps(node, debug=debug)) + if debug: + print(f" Build dependencies:") + print('\n'.join([' * ' + str(t) for t in result])) + print('\n') + return result builder.target_scanner = SCons.Scanner.Scanner( function=new_scanner, path_function=path_function ) -def get_libdeps(source, target, env, for_signature): +def get_libdeps(source, target, env, for_signature, debug=False): """Implementation of the special _LIBDEPS environment variable. Expands to the library dependencies for a target. """ target = env.Flatten([target]) - return __get_libdeps(target[0]) + return _get_libdeps(target[0], debug=debug) -def get_libdeps_objs(source, target, env, for_signature): +def get_libdeps_objs(source, target, env, for_signature, debug=False): objs = [] - for lib in get_libdeps(source, target, env, for_signature): + for lib in get_libdeps(source, target, env, for_signature, debug=debug): # This relies on Node.sources being order stable build-to-build. objs.extend(lib.sources) return objs -def make_get_syslibdeps_callable(shared): - - def get_syslibdeps(source, target, env, for_signature): - """ Given a SCons Node, return its system library dependencies. - These are the dependencies listed with SYSLIBDEPS, and are linked using -l. - """ +def stringify_deps(env, deps): + lib_link_prefix = env.subst("$LIBLINKPREFIX") + lib_link_suffix = env.subst("$LIBLINKSUFFIX") - deps = getattr(target[0].attributes, Constants.SysLibdepsCached, None) - if deps is None: + # Elements of libdeps are either strings (str or unicode), or they're File objects. + # If they're File objects, they can be passed straight through. If they're strings, + # they're believed to represent library short names, that should be prefixed with -l + # or the compiler-specific equivalent. I.e., 'm' becomes '-lm', but 'File("m.a") is passed + # through whole cloth. + return [f"{lib_link_prefix}{d}{lib_link_suffix}" if isinstance(d, str) else d for d in deps] - # Get the sys libdeps for the current node - deps = target[0].get_env().Flatten(target[0].get_env().get(Constants.SysLibdepsPrivate) or []) - deps += target[0].get_env().Flatten(target[0].get_env().get(Constants.SysLibdeps) or []) - for lib in __get_libdeps(target[0]): +def get_syslibdeps(source, target, env, for_signature, debug=False, shared=True): + """ Given a SCons Node, return its system library dependencies. - # For each libdep get its syslibdeps, and then check to see if we can - # add it to the deps list. For static build we will also include private - # syslibdeps to be transitive. For a dynamic build we will only make - # public libdeps transitive. - syslibs = [] - if not shared: - syslibs += lib.get_env().get(Constants.SysLibdepsPrivate) or [] - syslibs += lib.get_env().get(Constants.SysLibdeps) or [] + These are the dependencies listed with SYSLIBDEPS, and are linked using -l. + """ - # Validate the libdeps, a configure check has already checked what - # syslibdeps are available so we can hard fail here if a syslibdep - # is being attempted to be linked with. - for syslib in syslibs: - if not syslib: - continue + deps = getattr(target[0].attributes, Constants.SysLibdepsCached, None) + if deps is None: + + # Get the syslibdeps for the current node + deps = target[0].get_env().Flatten(copy.copy(target[0].get_env().get(Constants.SysLibdepsPrivate)) or []) + deps += target[0].get_env().Flatten(target[0].get_env().get(Constants.SysLibdeps) or []) + + for lib in _get_libdeps(target[0]): + + # For each libdep get its syslibdeps, and then check to see if we can + # add it to the deps list. For static build we will also include private + # syslibdeps to be transitive. For a dynamic build we will only make + # public libdeps transitive. + syslibs = [] + if not shared: + syslibs += lib.get_env().get(Constants.SysLibdepsPrivate) or [] + syslibs += lib.get_env().get(Constants.SysLibdeps) or [] + + # Validate the libdeps, a configure check has already checked what + # syslibdeps are available so we can hard fail here if a syslibdep + # is being attempted to be linked with. + for syslib in syslibs: + if not syslib: + continue - if isinstance(syslib, str) and syslib.startswith(Constants.MissingLibdep): - MissingSyslibdepError(textwrap.dedent(f"""\ + if isinstance(syslib, str) and syslib.startswith(Constants.MissingLibdep): + raise MissingSyslibdepError(textwrap.dedent(f"""\ + LibdepsError: Target '{str(target[0])}' depends on the availability of a system provided library for '{syslib[len(Constants.MissingLibdep):]}', but no suitable library was found during configuration.""" - )) - - deps.append(syslib) + )) - setattr(target[0].attributes, Constants.SysLibdepsCached, deps) + deps.append(syslib) - lib_link_prefix = env.subst("$LIBLINKPREFIX") - lib_link_suffix = env.subst("$LIBLINKSUFFIX") - # Elements of syslibdeps are either strings (str or unicode), or they're File objects. - # If they're File objects, they can be passed straight through. If they're strings, - # they're believed to represent library short names, that should be prefixed with -l - # or the compiler-specific equivalent. I.e., 'm' becomes '-lm', but 'File("m.a") is passed - # through whole cloth. - return [f"{lib_link_prefix}{d}{lib_link_suffix}" if isinstance(d, str) else d for d in deps] + setattr(target[0].attributes, Constants.SysLibdepsCached, deps) + return stringify_deps(env, deps) - return get_syslibdeps -def __append_direct_libdeps(node, prereq_nodes): +def _append_direct_libdeps(node, prereq_nodes): # We do not bother to decorate nodes that are not actual Objects if type(node) == str: return @@ -681,7 +897,7 @@ def __append_direct_libdeps(node, prereq_nodes): node.attributes.libdeps_direct.extend(prereq_nodes) -def __get_flagged_libdeps(source, target, env, for_signature): +def _get_flagged_libdeps(source, target, env, for_signature): for lib in get_libdeps(source, target, env, for_signature): # Make sure lib is a Node so we can get the env to check for flags. libnode = lib @@ -694,7 +910,7 @@ def __get_flagged_libdeps(source, target, env, for_signature): yield cur_lib -def __get_node_with_ixes(env, node, node_builder_type): +def _get_node_with_ixes(env, node, node_builder_type): """ Gets the node passed in node with the correct ixes applied for the given builder type. @@ -710,7 +926,7 @@ def __get_node_with_ixes(env, node, node_builder_type): # to run SCons performance intensive 'subst' each time cache_key = (id(env), node_builder_type) try: - prefix, suffix = __get_node_with_ixes.node_type_ixes[cache_key] + prefix, suffix = _get_node_with_ixes.node_type_ixes[cache_key] except KeyError: prefix = node_builder.get_prefix(env) suffix = node_builder.get_suffix(env) @@ -721,108 +937,185 @@ def __get_node_with_ixes(env, node, node_builder_type): if suffix == ".dll": suffix = ".lib" - __get_node_with_ixes.node_type_ixes[cache_key] = (prefix, suffix) + _get_node_with_ixes.node_type_ixes[cache_key] = (prefix, suffix) node_with_ixes = SCons.Util.adjustixes(node, prefix, suffix) return node_factory(node_with_ixes) -__get_node_with_ixes.node_type_ixes = dict() +_get_node_with_ixes.node_type_ixes = dict() -def make_libdeps_emitter( - dependency_builder, - dependency_map=dependency_visibility_ignored, - ignore_progdeps=False, -): - def libdeps_emitter(target, source, env): - """SCons emitter that takes values from the LIBDEPS environment variable and - converts them to File node objects, binding correct path information into - those File objects. +def add_node_from(env, node): - Emitters run on a particular "target" node during the initial execution of - the SConscript file, rather than during the later build phase. When they - run, the "env" environment's working directory information is what you - expect it to be -- that is, the working directory is considered to be the - one that contains the SConscript file. This allows specification of - relative paths to LIBDEPS elements. + env.GetLibdepsGraph().add_nodes_from([( + str(node.abspath), + { + NodeProps.bin_type.name: node.builder.get_name(env), + })]) - This emitter also adds LIBSUFFIX and LIBPREFIX appropriately. +def add_edge_from(env, from_node, to_node, visibility, direct): - NOTE: For purposes of LIBDEPS_DEPENDENTS propagation, only the first member - of the "target" list is made a prerequisite of the elements of LIBDEPS_DEPENDENTS. - """ + env.GetLibdepsGraph().add_edges_from([( + from_node, + to_node, + { + EdgeProps.direct.name: direct, + EdgeProps.visibility.name: int(visibility) + })]) - # Get all the libdeps from the env so we can - # can append them to the current target_node. - libdeps = [] - for dep_type in sorted(dependency_map.keys()): +def add_libdeps_node(env, target, libdeps): - # Libraries may not be stored as a list in the env, - # so we must convert single library strings to a list. - libs = env.get(dep_type_to_env_var[dep_type]) - if not SCons.Util.is_List(libs): - libs = [libs] + if str(target).endswith(env["SHLIBSUFFIX"]): + node = _get_node_with_ixes(env, str(target.abspath), target.get_builder().get_name(env)) + add_node_from(env, node) + + for libdep in libdeps: + if str(libdep.target_node).endswith(env["SHLIBSUFFIX"]): + add_edge_from( + env, + str(node.abspath), + str(libdep.target_node.abspath), + visibility=libdep.dependency_type, + direct=True) + + +def get_libdeps_nodes(env, target, builder, debug=False, visibility_map=None): + if visibility_map is None: + visibility_map = dependency_visibility_ignored + + if not SCons.Util.is_List(target): + target = [target] + + # Get the current list of nodes not to inherit on each target + no_inherit = set(env.get(Constants.LibdepsNoInherit, [])) + + # Get all the libdeps from the env so we can + # can append them to the current target_node. + libdeps = [] + for dep_type in sorted(visibility_map.keys()): + + if dep_type == deptype.Global: + if any("conftest" in str(t) for t in target): + # Ignore global dependencies for conftests + continue + + # Libraries may not be stored as a list in the env, + # so we must convert single library strings to a list. + libs = env.get(dep_type_to_env_var[dep_type], []).copy() + if not SCons.Util.is_List(libs): + libs = [libs] + + for lib in libs: + if not lib: + continue + + lib_with_ixes = _get_node_with_ixes(env, lib, builder) + + if lib in no_inherit: + if debug and not any("conftest" in str(t) for t in target): + print(f" {dep_type[1]} =/> {lib}") + + else: + if debug and not any("conftest" in str(t) for t in target): + print(f" {dep_type[1]} => {lib}") - for lib in libs: - if not lib: - continue - lib_with_ixes = __get_node_with_ixes(env, lib, dependency_builder) libdeps.append(dependency(lib_with_ixes, dep_type, lib)) - # Lint the libdeps to make sure they are following the rules. - # This will skip some or all of the checks depending on the options - # and LIBDEPS_TAGS used. - if not any("conftest" in str(t) for t in target): - LibdepLinter(env, target).lint_libdeps(libdeps) + return libdeps - # We ignored the dependency_map until now because we needed to use - # original dependency value for linting. Now go back through and - # use the map to convert to the desired dependencies, for example - # all Public in the static linking case. - for libdep in libdeps: - libdep.dependency_type = dependency_map[libdep.dependency_type] +def libdeps_emitter(target, source, env, debug=False, builder=None, visibility_map=None, ignore_progdeps=False): + """SCons emitter that takes values from the LIBDEPS environment variable and + converts them to File node objects, binding correct path information into + those File objects. + + Emitters run on a particular "target" node during the initial execution of + the SConscript file, rather than during the later build phase. When they + run, the "env" environment's working directory information is what you + expect it to be -- that is, the working directory is considered to be the + one that contains the SConscript file. This allows specification of + relative paths to LIBDEPS elements. + + This emitter also adds LIBSUFFIX and LIBPREFIX appropriately. + + NOTE: For purposes of LIBDEPS_DEPENDENTS propagation, only the first member + of the "target" list is made a prerequisite of the elements of LIBDEPS_DEPENDENTS. + """ + + if visibility_map is None: + visibility_map = dependency_visibility_ignored + + if debug and not any("conftest" in str(t) for t in target): + print(f"LIBDEPS EMITTER: {str(target[0])}") + print(f" Declared dependencies:") + print(f" global: {env.get(Constants.LibdepsGlobal, None)}") + print(f" private: {env.get(Constants.LibdepsPrivate, None)}") + print(f" public: {env.get(Constants.Libdeps, None)}") + print(f" interface: {env.get(Constants.LibdepsInterface, None)}") + print(f" no_inherit: {env.get(Constants.LibdepsNoInherit, None)}") + print(f" Edges:") + + libdeps = get_libdeps_nodes(env, target, builder, debug, visibility_map) + + if debug and not any("conftest" in str(t) for t in target): + print(f"\n") + + # Lint the libdeps to make sure they are following the rules. + # This will skip some or all of the checks depending on the options + # and LIBDEPS_TAGS used. + if not any("conftest" in str(t) for t in target): + LibdepLinter(env, target).lint_libdeps(libdeps) + + if env.get('SYMBOLDEPSSUFFIX', None): for t in target: - # target[0] must be a Node and not a string, or else libdeps will fail to - # work properly. - __append_direct_libdeps(t, libdeps) + add_libdeps_node(env, t, libdeps) + + # We ignored the visibility_map until now because we needed to use + # original dependency value for linting. Now go back through and + # use the map to convert to the desired dependencies, for example + # all Public in the static linking case. + for libdep in libdeps: + libdep.dependency_type = visibility_map[libdep.dependency_type] + + for t in target: + # target[0] must be a Node and not a string, or else libdeps will fail to + # work properly. + _append_direct_libdeps(t, libdeps) + + for dependent in env.get(Constants.LibdepsDependents, []): + if dependent is None: + continue + + visibility = deptype.Private + if isinstance(dependent, tuple): + visibility = dependent[1] + dependent = dependent[0] - for dependent in env.get(Constants.LibdepsDependents, []): + dependentNode = _get_node_with_ixes( + env, dependent, builder + ) + _append_direct_libdeps( + dependentNode, [dependency(target[0], visibility_map[visibility], dependent)] + ) + + if not ignore_progdeps: + for dependent in env.get(Constants.ProgdepsDependents, []): if dependent is None: continue - visibility = dependency.Private + visibility = deptype.Public if isinstance(dependent, tuple): + # TODO: Error here? Non-public PROGDEPS_DEPENDENTS probably are meaningless visibility = dependent[1] dependent = dependent[0] - dependentNode = __get_node_with_ixes( - env, dependent, dependency_builder + dependentNode = _get_node_with_ixes( + env, dependent, "Program" ) - __append_direct_libdeps( - dependentNode, [dependency(target[0], dependency_map[visibility], dependent)] + _append_direct_libdeps( + dependentNode, [dependency(target[0], visibility_map[visibility], dependent)] ) - if not ignore_progdeps: - for dependent in env.get(Constants.ProgdepsDependents, []): - if dependent is None: - continue - - visibility = dependency.Public - if isinstance(dependent, tuple): - # TODO: Error here? Non-public PROGDEPS_DEPENDENTS probably are meaningless - visibility = dependent[1] - dependent = dependent[0] - - dependentNode = __get_node_with_ixes( - env, dependent, "Program" - ) - __append_direct_libdeps( - dependentNode, [dependency(target[0], dependency_map[visibility], dependent)] - ) - - return target, source - - return libdeps_emitter + return target, source def expand_libdeps_tags(source, target, env, for_signature): @@ -841,7 +1134,7 @@ def expand_libdeps_with_flags(source, target, env, for_signature): # below a bit cleaner. prev_libdep = None - for flagged_libdep in __get_flagged_libdeps(source, target, env, for_signature): + for flagged_libdep in _get_flagged_libdeps(source, target, env, for_signature): # If there are no flags to process we can move on to the next lib. # start_index wont mater in the case because if there are no flags @@ -882,8 +1175,109 @@ def expand_libdeps_with_flags(source, target, env, for_signature): return libdeps_with_flags - -def setup_environment(env, emitting_shared=False, linting='on'): +def generate_libdeps_graph(env): + if env.get('SYMBOLDEPSSUFFIX', None): + + find_symbols = env.Dir("$BUILD_DIR").path + "/libdeps/find_symbols" + libdeps_graph = env.GetLibdepsGraph() + + symbol_deps = [] + for symbols_file, target_node in env.get('LIBDEPS_SYMBOL_DEP_FILES', []): + + direct_libdeps = [] + for direct_libdep in _get_sorted_direct_libdeps(target_node): + add_node_from(env, direct_libdep.target_node) + add_edge_from( + env, + str(target_node.abspath), + str(direct_libdep.target_node.abspath), + visibility=int(direct_libdep.dependency_type), + direct=True) + direct_libdeps.append(direct_libdep.target_node.abspath) + + for libdep in _get_libdeps(target_node): + if libdep.abspath not in direct_libdeps: + add_node_from(env, libdep) + add_edge_from( + env, + str(target_node.abspath), + str(libdep.abspath), + visibility=int(deptype.Public), + direct=False) + if env['PLATFORM'] == 'darwin': + sep = ' ' + else: + sep = ':' + ld_path = sep.join([os.path.dirname(str(libdep)) for libdep in _get_libdeps(target_node)]) + symbol_deps.append(env.Command( + target=symbols_file, + source=target_node, + action=SCons.Action.Action( + f'{find_symbols} $SOURCE "{ld_path}" $TARGET', + "Generating $SOURCE symbol dependencies" if not env['VERBOSE'] else ""))) + + def write_graph_hash(env, target, source): + + with open(target[0].path, 'w') as f: + json_str = json.dumps(networkx.readwrite.json_graph.node_link_data(env.GetLibdepsGraph()), sort_keys=True).encode('utf-8') + f.write(hashlib.sha256(json_str).hexdigest()) + + graph_hash = env.Command(target="$BUILD_DIR/libdeps/graph_hash.sha256", + source=symbol_deps, + action=SCons.Action.FunctionAction( + write_graph_hash, + {"cmdstr": None})) + env.Depends(graph_hash, [ + env.File("#SConstruct")] + + glob.glob("**/SConscript", recursive=True) + + [os.path.abspath(__file__), + env.File('$BUILD_DIR/mongo/util/version_constants.h')]) + + graph_node = env.Command( + target=env.get('LIBDEPS_GRAPH_FILE', None), + source=symbol_deps, + action=SCons.Action.FunctionAction( + generate_graph, + {"cmdstr": "Generating libdeps graph"})) + + env.Depends(graph_node, [graph_hash] + env.Glob("#buildscripts/libdeps/libdeps/*")) + +def generate_graph(env, target, source): + + libdeps_graph = env.GetLibdepsGraph() + + for symbol_deps_file in source: + with open(str(symbol_deps_file)) as f: + symbols = {} + try: + for symbol, lib in json.load(f).items(): + # ignore symbols from external libraries, + # they will just clutter the graph + if lib.startswith(env.Dir("$BUILD_DIR").path): + if lib not in symbols: + symbols[lib] = [] + symbols[lib].append(symbol) + except json.JSONDecodeError: + env.FatalError(f"Failed processing json file: {str(symbol_deps_file)}") + + for libdep in symbols: + from_node = os.path.abspath(str(symbol_deps_file)[:-len(env['SYMBOLDEPSSUFFIX'])]) + to_node = os.path.abspath(libdep).strip() + libdeps_graph.add_edges_from([( + from_node, + to_node, + {EdgeProps.symbols.name: " ".join(symbols[libdep]) })]) + node = env.File(str(symbol_deps_file)[:-len(env['SYMBOLDEPSSUFFIX'])]) + add_node_from(env, node) + + libdeps_graph_file = f"{env.Dir('$BUILD_DIR').path}/libdeps/libdeps.graphml" + networkx.write_graphml(libdeps_graph, libdeps_graph_file, named_key_ids=True) + with fileinput.FileInput(libdeps_graph_file, inplace=True) as file: + for line in file: + print(line.replace(str(env.Dir("$BUILD_DIR").abspath + os.sep), ''), end='') + + +def setup_environment(env, emitting_shared=False, debug='off', linting='on'): """Set up the given build environment to do LIBDEPS tracking.""" LibdepLinter.skip_linting = linting == 'off' @@ -895,38 +1289,113 @@ def setup_environment(env, emitting_shared=False, linting='on'): env["_LIBDEPS"] = "$_LIBDEPS_LIBS" env["_LIBDEPS_TAGS"] = expand_libdeps_tags - env["_LIBDEPS_GET_LIBS"] = get_libdeps - env["_LIBDEPS_OBJS"] = get_libdeps_objs - env["_SYSLIBDEPS"] = make_get_syslibdeps_callable(emitting_shared) + env["_LIBDEPS_GET_LIBS"] = partial(get_libdeps, debug=debug) + env["_LIBDEPS_OBJS"] = partial(get_libdeps_objs, debug=debug) + env["_SYSLIBDEPS"] = partial(get_syslibdeps, debug=debug, shared=emitting_shared) env[Constants.Libdeps] = SCons.Util.CLVar() env[Constants.SysLibdeps] = SCons.Util.CLVar() - # We need a way for environments to alter just which libdeps - # emitter they want, without altering the overall program or - # library emitter which may have important effects. The - # substitution rules for emitters are a little strange, so build - # ourselves a little trampoline to use below so we don't have to - # deal with it. - def make_indirect_emitter(variable): - def indirect_emitter(target, source, env): - return env[variable](target, source, env) + # Create the alias for graph generation, the existence of this alias + # on the command line will cause the libdeps-graph generation to be + # configured. + env['LIBDEPS_GRAPH_ALIAS'] = env.Alias( + 'generate-libdeps-graph', + "${BUILD_DIR}/libdeps/libdeps.graphml")[0] + + if str(env['LIBDEPS_GRAPH_ALIAS']) in COMMAND_LINE_TARGETS: + + # Detect if the current system has the tools to perform the generation. + if env.GetOption('ninja') != 'disabled': + env.FatalError("Libdeps graph generation is not supported with ninja builds.") + if not emitting_shared: + env.FatalError("Libdeps graph generation currently only supports dynamic builds.") + + if env['PLATFORM'] == 'darwin': + required_bins = ['awk', 'sed', 'otool', 'nm'] + else: + required_bins = ['awk', 'grep', 'ldd', 'nm'] + for bin in required_bins: + if not env.WhereIs(bin): + env.FatalError(f"'{bin}' not found, Libdeps graph generation requires {bin}.") + + + # The find_symbols binary is a small fast C binary which will extract the missing + # symbols from the target library, and discover what linked libraries supply it. This + # setups the binary to be built. + find_symbols_env = env.Clone() + find_symbols_env.VariantDir('${BUILD_DIR}/libdeps', 'buildscripts/libdeps', duplicate = 0) + find_symbols_node = find_symbols_env.Program( + target='${BUILD_DIR}/libdeps/find_symbols', + source=['${BUILD_DIR}/libdeps/find_symbols.c'], + CFLAGS=['-O3']) + + # Here we are setting up some functions which will return single instance of the + # network graph and symbol deps list. We also setup some environment variables + # which are used along side the functions. + symbol_deps = [] + def append_symbol_deps(env, symbol_deps_file): + env.Depends(env['LIBDEPS_GRAPH_FILE'], symbol_deps_file[0]) + symbol_deps.append(symbol_deps_file) + env.AddMethod(append_symbol_deps, "AppendSymbolDeps") + + env['LIBDEPS_SYMBOL_DEP_FILES'] = symbol_deps + env['LIBDEPS_GRAPH_FILE'] = env.File("${BUILD_DIR}/libdeps/libdeps.graphml") + env['LIBDEPS_GRAPH_SCHEMA_VERSION'] = 3 + env["SYMBOLDEPSSUFFIX"] = '.symbol_deps' + + libdeps_graph = LibdepsGraph() + libdeps_graph.graph['invocation'] = " ".join([env['ESCAPE'](str(sys.executable))] + [env['ESCAPE'](arg) for arg in sys.argv]) + libdeps_graph.graph['git_hash'] = env['MONGO_GIT_HASH'] + libdeps_graph.graph['graph_schema_version'] = env['LIBDEPS_GRAPH_SCHEMA_VERSION'] + libdeps_graph.graph['build_dir'] = env.Dir('$BUILD_DIR').path + libdeps_graph.graph['deptypes'] = json.dumps({key: value[0] for key, value in deptype.__members__.items() if isinstance(value, tuple)}) + + def get_libdeps_graph(env): + return libdeps_graph + env.AddMethod(get_libdeps_graph, "GetLibdepsGraph") + + # Now we will setup an emitter, and an additional action for several + # of the builder involved with dynamic builds. + def libdeps_graph_emitter(target, source, env): + if "conftest" not in str(target[0]): + symbol_deps_file = env.File(str(target[0]) + env['SYMBOLDEPSSUFFIX']) + env.Depends(symbol_deps_file, '${BUILD_DIR}/libdeps/find_symbols') + env.AppendSymbolDeps((symbol_deps_file,target[0])) + + return target, source + + for builder_name in ("Program", "SharedLibrary", "LoadableModule"): + builder = env['BUILDERS'][builder_name] + base_emitter = builder.emitter + new_emitter = SCons.Builder.ListEmitter([base_emitter, libdeps_graph_emitter]) + builder.emitter = new_emitter - return indirect_emitter env.Append( - LIBDEPS_LIBEMITTER=make_libdeps_emitter("StaticLibrary"), - LIBEMITTER=make_indirect_emitter("LIBDEPS_LIBEMITTER"), - LIBDEPS_SHAREMITTER=make_libdeps_emitter("SharedArchive", ignore_progdeps=True), - SHAREMITTER=make_indirect_emitter("LIBDEPS_SHAREMITTER"), - LIBDEPS_SHLIBEMITTER=make_libdeps_emitter( - "SharedLibrary", dependency_visibility_honored + LIBDEPS_LIBEMITTER=partial( + libdeps_emitter, + debug=debug, + builder="StaticLibrary"), + LIBEMITTER=lambda target, source, env: env["LIBDEPS_LIBEMITTER"](target, source, env), + LIBDEPS_SHAREMITTER=partial( + libdeps_emitter, + debug=debug, + builder="SharedArchive", ignore_progdeps=True), + SHAREMITTER=lambda target, source, env: env["LIBDEPS_SHAREMITTER"](target, source, env), + LIBDEPS_SHLIBEMITTER=partial( + libdeps_emitter, + debug=debug, + builder="SharedLibrary", + visibility_map=dependency_visibility_honored ), - SHLIBEMITTER=make_indirect_emitter("LIBDEPS_SHLIBEMITTER"), - LIBDEPS_PROGEMITTER=make_libdeps_emitter( - "SharedLibrary" if emitting_shared else "StaticLibrary" + SHLIBEMITTER=lambda target, source, env: env["LIBDEPS_SHLIBEMITTER"](target, source, env), + LIBDEPS_PROGEMITTER=partial( + libdeps_emitter, + debug=debug, + builder="SharedLibrary" if emitting_shared else "StaticLibrary" ), - PROGEMITTER=make_indirect_emitter("LIBDEPS_PROGEMITTER"), + PROGEMITTER=lambda target, source, env: env["LIBDEPS_PROGEMITTER"](target, source, env), ) env["_LIBDEPS_LIBS_WITH_TAGS"] = expand_libdeps_with_flags @@ -940,7 +1409,7 @@ def setup_environment(env, emitting_shared=False, linting='on'): env.Prepend(_LIBFLAGS="$_LIBDEPS_TAGS $_LIBDEPS $_SYSLIBDEPS ") for builder_name in ("Program", "SharedLibrary", "LoadableModule", "SharedArchive"): try: - update_scanner(env["BUILDERS"][builder_name]) + update_scanner(env, builder_name, debug=debug) except KeyError: pass @@ -955,7 +1424,7 @@ def setup_conftests(conf): if result: context.env[var] = lib return context.Result(result) - context.env[var] = __missing_syslib(name) + context.env[var] = _missing_syslib(name) return context.Result(result) conf.AddTest("FindSysLibDep", FindSysLibDep) diff --git a/site_scons/libdeps_next.py b/site_scons/libdeps_next.py deleted file mode 100644 index ddf1bc93631..00000000000 --- a/site_scons/libdeps_next.py +++ /dev/null @@ -1,1430 +0,0 @@ -"""Extension to SCons providing advanced static library dependency tracking. - -These modifications to a build environment, which can be attached to -StaticLibrary and Program builders via a call to setup_environment(env), -cause the build system to track library dependencies through static libraries, -and to add them to the link command executed when building programs. - -For example, consider a program 'try' that depends on a lib 'tc', which in -turn uses a symbol from a lib 'tb' which in turn uses a library from 'ta'. - -Without this package, the Program declaration for "try" looks like this: - -Program('try', ['try.c', 'path/to/${LIBPREFIX}tc${LIBSUFFIX}', - 'path/to/${LIBPREFIX}tb${LIBSUFFIX}', - 'path/to/${LIBPREFIX}ta${LIBSUFFIX}',]) - -With this library, we can instead write the following - -Program('try', ['try.c'], LIBDEPS=['path/to/tc']) -StaticLibrary('tc', ['c.c'], LIBDEPS=['path/to/tb']) -StaticLibrary('tb', ['b.c'], LIBDEPS=['path/to/ta']) -StaticLibrary('ta', ['a.c']) - -And the build system will figure out that it needs to link libta.a and libtb.a -when building 'try'. - -A StaticLibrary S may also declare programs or libraries, [L1, ...] to be dependent -upon S by setting LIBDEPS_DEPENDENTS=[L1, ...], using the same syntax as is used -for LIBDEPS, except that the libraries and programs will not have LIBPREFIX/LIBSUFFIX -automatically added when missing. -""" - -# Copyright (c) 2010, Corensic Inc., All Rights Reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -from collections import defaultdict -from functools import partial -import enum -import copy -import json -import os -import sys -import glob -import textwrap -import hashlib -import json -import fileinput - -try: - import networkx - from buildscripts.libdeps.libdeps.graph import EdgeProps, NodeProps, LibdepsGraph -except ImportError: - pass - -import SCons.Errors -import SCons.Scanner -import SCons.Util -import SCons -from SCons.Script import COMMAND_LINE_TARGETS - - - -class Constants: - Libdeps = "LIBDEPS" - LibdepsCached = "LIBDEPS_cached" - LibdepsDependents = "LIBDEPS_DEPENDENTS" - LibdepsGlobal = "LIBDEPS_GLOBAL" - LibdepsNoInherit = "LIBDEPS_NO_INHERIT" - LibdepsInterface ="LIBDEPS_INTERFACE" - LibdepsPrivate = "LIBDEPS_PRIVATE" - LibdepsTags = "LIBDEPS_TAGS" - LibdepsTagExpansion = "LIBDEPS_TAG_EXPANSIONS" - MissingLibdep = "MISSING_LIBDEP_" - ProgdepsDependents = "PROGDEPS_DEPENDENTS" - SysLibdeps = "SYSLIBDEPS" - SysLibdepsCached = "SYSLIBDEPS_cached" - SysLibdepsPrivate = "SYSLIBDEPS_PRIVATE" - - -class deptype(tuple, enum.Enum): - Global: tuple = (0, 'GLOBAL') - Public: tuple = (1, 'PUBLIC') - Private: tuple = (2, 'PRIVATE') - Interface: tuple = (3, 'INTERFACE') - - def __lt__(self, other): - if self.__class__ is other.__class__: - return self.value[0] < other.value[0] - return NotImplemented - - def __str__(self): - return self.value[1] - - def __int__(self): - return self.value[0] - - -class dependency: - def __init__(self, value, deptype, listed_name): - self.target_node = value - self.dependency_type = deptype - self.listed_name = listed_name - - def __str__(self): - return str(self.target_node) - - -class FlaggedLibdep: - """ - Utility class used for processing prefix and postfix flags on libdeps. The class - can keep track of separate lists for prefix and postfix as well separators, - allowing for modifications to the lists and then re-application of the flags with - modifications to a larger list representing the link line. - """ - - def __init__(self, libnode=None, env=None, start_index=None): - """ - The libnode should be a Libdep SCons node, and the env is the target env in - which the target has a dependency on the libdep. The start_index is important as - it determines where this FlaggedLibdep starts in the larger list of libdeps. - - The start_index will cut the larger list, and then re-apply this libdep with flags - at that location. This class will exract the prefix and postfix flags - from the Libdep nodes env. - """ - self.libnode = libnode - self.env = env - - # We need to maintain our own copy so as not to disrupt the env's original list. - try: - self.prefix_flags = copy.copy(getattr(libnode.attributes, 'libdeps_prefix_flags', [])) - self.postfix_flags = copy.copy(getattr(libnode.attributes, 'libdeps_postfix_flags', [])) - except AttributeError: - self.prefix_flags = [] - self.postfix_flags = [] - - self.start_index = start_index - - def __str__(self): - return str(self.libnode) - - def add_lib_to_result_list(self, result): - """ - This function takes in the current list of libdeps for a given target, and will - apply the libdep taking care of the prefix, postfix and any required separators when - adding to the list. - """ - if self.start_index != None: - result[:] = result[:self.start_index] - self._add_lib_and_flags(result) - - def _get_separators(self, flags): - - separated_list = [] - - for flag in flags: - separators = self.env.get('LIBDEPS_FLAG_SEPARATORS', {}).get(flag, {}) - separated_list.append(separators.get('prefix', ' ')) - separated_list.append(flag) - separated_list.append(separators.get('suffix', ' ')) - - return separated_list - - def _get_lib_with_flags(self): - - lib_and_flags = [] - - lib_and_flags += self._get_separators(self.prefix_flags) - lib_and_flags += [str(self)] - lib_and_flags += self._get_separators(self.postfix_flags) - - return lib_and_flags - - def _add_lib_and_flags(self, result): - """ - This function will clean up the flags for the link line after extracting everything - from the environment. This will mostly look for separators that are just a space, and - remove them from the list, as the final link line will add spaces back for each item - in the list. It will take to concat flags where the separators don't allow for a space. - """ - next_contig_str = '' - - for item in self._get_lib_with_flags(): - if item != ' ': - next_contig_str += item - else: - if next_contig_str: - result.append(next_contig_str) - next_contig_str = '' - - if next_contig_str: - result.append(next_contig_str) - - - -class LibdepLinter: - """ - This class stores the rules for linting the libdeps. Using a decorator, - new rules can easily be added to the class, and will be called when - linting occurs. Each rule is run on each libdep. - - When a rule is broken, a LibdepLinterError exception will be raised. - Optionally the class can be configured to print the error message and - keep going with the build. - - Each rule should provide a method to skip that rule on a given node, - by supplying the correct flag in the LIBDEPS_TAG environment var for - that node. - - """ - - skip_linting = False - print_linter_errors = False - - linting_time = 0 - linting_infractions = 0 - linting_rules_run = 0 - registered_linting_time = False - - dangling_dep_dependents = set() - - @staticmethod - def _make_linter_decorator(): - """ - This is used for gathering the functions - by decorator that will be used for linting a given libdep. - """ - - funcs = {} - def linter_rule_func(func): - funcs[func.__name__] = func - return func - - linter_rule_func.all = funcs - return linter_rule_func - - linter_rule = _make_linter_decorator.__func__() - linter_final_check = _make_linter_decorator.__func__() - - @classmethod - def _skip_linting(cls): - return cls.skip_linting - - @classmethod - def _start_timer(cls): - # Record time spent linting if we are in print mode. - if cls.print_linter_errors: - from timeit import default_timer as timer - return timer() - - @classmethod - def _stop_timer(cls, start, num_rules): - # Record time spent linting if we are in print mode. - if cls.print_linter_errors: - from timeit import default_timer as timer - cls.linting_time += timer() - start - cls.linting_rules_run += num_rules - - def __init__(self, env, target=None): - self.env = env - self.target = target - self.unique_libs = set() - self._libdeps_types_previous = dict() - - - # If we are in print mode, we will record some linting metrics, - # and print the results at the end of the build. - if self.__class__.print_linter_errors and not self.__class__.registered_linting_time: - import atexit - def print_linting_time(): - print(f"Spent {self.__class__.linting_time} seconds linting libdeps.") - print(f"Found {self.__class__.linting_infractions} issues out of {self.__class__.linting_rules_run} libdeps rules checked.") - atexit.register(print_linting_time) - self.__class__.registered_linting_time = True - - def lint_libdeps(self, libdeps): - """ - Lint the given list of libdeps for all - rules. - """ - - # Build performance optimization if you - # are sure your build is clean. - if self._skip_linting(): - return - start = self._start_timer() - - linter_rules = [ - getattr(self, linter_rule) - for linter_rule in self.linter_rule.all - ] - - for libdep in libdeps: - for linter_rule in linter_rules: - linter_rule(libdep) - - self._stop_timer(start, len(linter_rules)*len(libdeps)) - - def final_checks(self): - # Build performance optimization if you - # are sure your build is clean. - if self._skip_linting(): - return - start = self._start_timer() - - linter_rules = [ - getattr(self.__class__, rule) - for rule in self.__class__.linter_final_check.all - ] - - for linter_rule in linter_rules: - linter_rule(self) - - self._stop_timer(start, len(linter_rules)) - - def _raise_libdep_lint_exception(self, message): - """ - Raises the LibdepLinterError exception or if configure - to do so, just prints the error. - """ - prefix = "LibdepLinter: \n\t" - message = prefix + message.replace('\n', '\n\t') + '\n' - if self.__class__.print_linter_errors: - self.__class__.linting_infractions += 1 - print(message) - else: - raise LibdepLinterError(message) - - def _check_for_lint_tags(self, lint_tag, env=None, inclusive_tag=False): - """ - Used to get the lint tag from the environment, - and if printing instead of raising exceptions, - will ignore the tags. - """ - - # If print mode is on, we want to make sure to bypass checking - # exclusive tags so we can make sure the exceptions are not excluded - # and are printed. If it's an inclusive tag, we want to ignore this - # early return completely, because we want to make sure the node - # gets included for checking, and the exception gets printed. - if not inclusive_tag and self.__class__.print_linter_errors: - return False - - target_env = env if env else self.env - - if lint_tag in target_env.get(Constants.LibdepsTags, []): - return True - - def _get_deps_dependents(self, env=None): - """ util function to get all types of DEPS_DEPENDENTS""" - target_env = env if env else self.env - deps_dependents = target_env.get(Constants.LibdepsDependents, []).copy() - deps_dependents += target_env.get(Constants.ProgdepsDependents, []) - return deps_dependents - - def _get_deps_dependents_with_types(self, builder, type): - return [ - (dependent[0], builder) if isinstance(dependent, tuple) else - (dependent, builder) - for dependent in self.env.get(type, []) - ] - - @linter_rule - def linter_rule_leaf_node_no_deps(self, libdep): - """ - LIBDEP RULE: - Nodes marked explicitly as a leaf node should not have any dependencies, - unless those dependencies are explicitly marked as allowed as leaf node - dependencies. - """ - if not self._check_for_lint_tags('lint-leaf-node-no-deps', inclusive_tag=True): - return - - # Ignore dependencies that explicitly exempt themselves. - if self._check_for_lint_tags('lint-leaf-node-allowed-dep', libdep.target_node.env): - return - - # Global dependencies will apply to leaf nodes, so they should - # be automatically exempted. - if libdep.dependency_type == deptype.Global: - return - - target_type = self.target[0].builder.get_name(self.env) - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception( - textwrap.dedent(f"""\ - {target_type} '{self.target[0]}' has dependency '{lib}' and is marked explicitly as a leaf node, - and '{lib}' does not exempt itself as an exception to the rule.""" - )) - - @linter_rule - def linter_rule_no_dangling_deps(self, libdep): - """ - LIBDEP RULE: - All reverse dependency edges must point to a node which will be built. - """ - if self._check_for_lint_tags('lint-allow-dangling-dep-dependent'): - return - - # Gather the DEPS_DEPENDENTS and store them for a final check to make sure they were - # eventually defined as being built by some builder - libdep_libbuilder = self.target[0].builder.get_name(self.env) - deps_depends = self._get_deps_dependents_with_types(libdep_libbuilder, Constants.LibdepsDependents) - deps_depends += self._get_deps_dependents_with_types("Program", Constants.ProgdepsDependents) - self.__class__.dangling_dep_dependents.update(deps_depends) - - @linter_final_check - def linter_rule_no_dangling_dep_final_check(self): - # At this point the SConscripts have defined all the build items, - # and so we can go check any DEPS_DEPENDENTS listed and make sure a builder - # was instanciated to build them. - for dep_dependent in self.__class__.dangling_dep_dependents: - dep_node = _get_node_with_ixes(self.env, dep_dependent[0], dep_dependent[1]) - if not dep_node.has_builder(): - self._raise_libdep_lint_exception( - textwrap.dedent(f"""\ - Found reverse dependency linked to node '{dep_node}' - which will never be built by any builder. - Remove the reverse dependency or add a way to build it.""" - )) - - @linter_rule - def linter_rule_no_public_deps(self, libdep): - """ - LIBDEP RULE: - Nodes explicitly marked as not allowed to have public dependencies, should not - have public dependencies, unless the dependency is explicitly marked as allowed. - """ - if not self._check_for_lint_tags('lint-no-public-deps', inclusive_tag=True): - return - - if libdep.dependency_type not in (deptype.Global, deptype.Private): - # Check if the libdep exempts itself from this rule. - if self._check_for_lint_tags('lint-public-dep-allowed', libdep.target_node.env): - return - - target_type = self.target[0].builder.get_name(self.env) - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception( - textwrap.dedent(f"""\ - {target_type} '{self.target[0]}' has public dependency '{lib}' - while being marked as not allowed to have public dependencies - and '{lib}' does not exempt itself.""" - )) - - @linter_rule - def linter_rule_no_dups(self, libdep): - """ - LIBDEP RULE: - A given node shall not link the same LIBDEP across public, private - or interface dependency types because it is ambiguous and unnecessary. - """ - if self._check_for_lint_tags('lint-allow-dup-libdeps'): - return - - if str(libdep) in self.unique_libs: - target_type = self.target[0].builder.get_name(self.env) - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception( - f"{target_type} '{self.target[0]}' links '{lib}' multiple times." - ) - - self.unique_libs.add(str(libdep)) - - @linter_rule - def linter_rule_alphabetic_deps(self, libdep): - """ - LIBDEP RULE: - Libdeps shall be listed alphabetically by type in the SCons files. - """ - - if self._check_for_lint_tags('lint-allow-non-alphabetic'): - return - - # Start checking order after the first item in the list is recorded to compare with. - if libdep.dependency_type in self._libdeps_types_previous: - if self._libdeps_types_previous[libdep.dependency_type] > libdep.listed_name: - target_type = self.target[0].builder.get_name(self.env) - self._raise_libdep_lint_exception( - f"{target_type} '{self.target[0]}' has '{libdep.listed_name}' listed in {dep_type_to_env_var[libdep.dependency_type]} out of alphabetical order." - ) - - self._libdeps_types_previous[libdep.dependency_type] = libdep.listed_name - - @linter_rule - def linter_rule_programs_link_private(self, libdep): - """ - LIBDEP RULE: - All Programs shall only have public dependency's - because a Program will never be a dependency of another Program - or Library, and LIBDEPS transitiveness does not apply. Public - transitiveness has no meaning in this case and is used just as default. - """ - if self._check_for_lint_tags('lint-allow-program-links-private'): - return - - if (self.target[0].builder.get_name(self.env) == "Program" - and libdep.dependency_type not in (deptype.Global, deptype.Public)): - - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception( - textwrap.dedent(f"""\ - Program '{self.target[0]}' links non-public library '{lib}' - A 'Program' can only have {Constants.Libdeps} libs, - not {Constants.LibdepsPrivate} or {Constants.LibdepsInterface}.""" - )) - - @linter_rule - def linter_rule_no_bidirectional_deps(self, libdep): - """ - LIBDEP RULE: - And Library which issues reverse dependencies, shall not be directly - linked to by another node, to prevent forward and reverse linkages existing - at the same node. Instead the content of the library that needs to issue reverse - dependency needs to be separated from content that needs direct linkage into two - separate libraries, which can be linked correctly respectively. - """ - - if not libdep.target_node.env: - return - elif self._check_for_lint_tags('lint-allow-bidirectional-edges', libdep.target_node.env): - return - elif len(self._get_deps_dependents(libdep.target_node.env)) > 0: - - target_type = self.target[0].builder.get_name(self.env) - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception(textwrap.dedent(f"""\ - {target_type} '{self.target[0]}' links directly to a reverse dependency node '{lib}' - No node can link directly to a node that has {Constants.LibdepsDependents} or {Constants.ProgdepsDependents}.""" - )) - - @linter_rule - def linter_rule_nonprivate_on_deps_dependents(self, libdep): - """ - LIBDEP RULE: - A Library that issues reverse dependencies, shall not link libraries - with any kind of transitiveness, and will only link libraries privately. - This is because functionality that requires reverse dependencies should - not be transitive. - """ - if self._check_for_lint_tags('lint-allow-nonprivate-on-deps-dependents'): - return - - if (libdep.dependency_type != deptype.Private and libdep.dependency_type != deptype.Global - and len(self._get_deps_dependents()) > 0): - - target_type = self.target[0].builder.get_name(self.env) - lib = os.path.basename(str(libdep)) - self._raise_libdep_lint_exception(textwrap.dedent(f"""\ - {target_type} '{self.target[0]}' links non-private libdep '{lib}' and has a reverse dependency. - A {target_type} can only have {Constants.LibdepsPrivate} depends if it has {Constants.LibdepsDependents} or {Constants.ProgdepsDependents}.""" - )) - - @linter_rule - def linter_rule_libdeps_must_be_list(self, libdep): - """ - LIBDEP RULE: - LIBDEPS, LIBDEPS_PRIVATE, and LIBDEPS_INTERFACE must be set as lists in the - environment. - """ - if self._check_for_lint_tags('lint-allow-nonlist-libdeps'): - return - - libdeps_vars = list(dep_type_to_env_var.values()) + [ - Constants.LibdepsDependents, - Constants.ProgdepsDependents] - - for dep_type_val in libdeps_vars: - - libdeps_list = self.env.get(dep_type_val, []) - if not SCons.Util.is_List(libdeps_list): - - target_type = self.target[0].builder.get_name(self.env) - self._raise_libdep_lint_exception(textwrap.dedent(f"""\ - Found non-list type '{libdeps_list}' while evaluating {dep_type_val[1]} for {target_type} '{self.target[0]}' - {dep_type_val[1]} must be setup as a list.""" - )) - -dependency_visibility_ignored = { - deptype.Global: deptype.Public, - deptype.Interface: deptype.Public, - deptype.Public: deptype.Public, - deptype.Private: deptype.Public, -} - -dependency_visibility_honored = { - deptype.Global: deptype.Private, - deptype.Interface: deptype.Interface, - deptype.Public: deptype.Public, - deptype.Private: deptype.Private, -} - -dep_type_to_env_var = { - deptype.Global: Constants.LibdepsGlobal, - deptype.Interface: Constants.LibdepsInterface, - deptype.Public: Constants.Libdeps, - deptype.Private: Constants.LibdepsPrivate, -} - -class DependencyCycleError(SCons.Errors.UserError): - """Exception representing a cycle discovered in library dependencies.""" - - def __init__(self, first_node): - super(DependencyCycleError, self).__init__() - self.cycle_nodes = [first_node] - - def __str__(self): - return "Library dependency cycle detected: " + " => ".join( - str(n) for n in self.cycle_nodes - ) - -class LibdepLinterError(SCons.Errors.UserError): - """Exception representing a discongruent usages of libdeps""" - -class MissingSyslibdepError(SCons.Errors.UserError): - """Exception representing a discongruent usages of libdeps""" - -def _get_sorted_direct_libdeps(node): - direct_sorted = getattr(node.attributes, "libdeps_direct_sorted", None) - if direct_sorted is None: - direct = getattr(node.attributes, "libdeps_direct", []) - direct_sorted = sorted(direct, key=lambda t: str(t.target_node)) - setattr(node.attributes, "libdeps_direct_sorted", direct_sorted) - return direct_sorted - - -class LibdepsVisitationMark(enum.IntEnum): - UNMARKED = 0 - MARKED_PRIVATE = 1 - MARKED_PUBLIC = 2 - - -def _libdeps_visit_private(n, marked, walking, debug=False): - if marked[n.target_node] >= LibdepsVisitationMark.MARKED_PRIVATE: - return - - if n.target_node in walking: - raise DependencyCycleError(n.target_node) - - walking.add(n.target_node) - - try: - for child in _get_sorted_direct_libdeps(n.target_node): - _libdeps_visit_private(child, marked, walking) - - marked[n.target_node] = LibdepsVisitationMark.MARKED_PRIVATE - - except DependencyCycleError as e: - if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[-1]: - e.cycle_nodes.insert(0, n.target_node) - raise - - finally: - walking.remove(n.target_node) - - -def _libdeps_visit(n, tsorted, marked, walking, debug=False): - # The marked dictionary tracks which sorts of visitation a node - # has received. Values for a given node can be UNMARKED/absent, - # MARKED_PRIVATE, or MARKED_PUBLIC. These are to be interpreted as - # follows: - # - # 0/UNMARKED: Node is not not marked. - # - # MARKED_PRIVATE: Node has only been explored as part of looking - # for cycles under a LIBDEPS_PRIVATE edge. - # - # MARKED_PUBLIC: Node has been explored and any of its transiive - # dependencies have been incorporated into `tsorted`. - # - # The __libdeps_visit_private function above will only mark things - # at with MARKED_PRIVATE, while __libdeps_visit will mark things - # MARKED_PUBLIC. - if marked[n.target_node] == LibdepsVisitationMark.MARKED_PUBLIC: - return - - # The walking set is used for cycle detection. We record all our - # predecessors in our depth-first search, and if we observe one of - # our predecessors as a child, we know we have a cycle. - if n.target_node in walking: - raise DependencyCycleError(n.target_node) - - walking.add(n.target_node) - - if debug: - print(f" * {n.dependency_type} => {n.listed_name}") - - try: - children = _get_sorted_direct_libdeps(n.target_node) - - # We first walk all of our public dependencies so that we can - # put full marks on anything that is in our public transitive - # graph. We then do a second walk into any private nodes to - # look for cycles. While we could do just one walk over the - # children, it is slightly faster to do two passes, since if - # the algorithm walks into a private edge early, it would do a - # lot of non-productive (except for cycle checking) walking - # and marking, but if another public path gets into that same - # subtree, then it must walk and mark it again to raise it to - # the public mark level. Whereas, if the algorithm first walks - # the whole public tree, then those are all productive marks - # and add to tsorted, and then the private walk will only need - # to examine those things that are only reachable via private - # edges. - - for child in children: - if child.dependency_type != deptype.Private: - _libdeps_visit(child, tsorted, marked, walking, debug) - - for child in children: - if child.dependency_type == deptype.Private: - _libdeps_visit_private(child, marked, walking, debug) - - marked[n.target_node] = LibdepsVisitationMark.MARKED_PUBLIC - tsorted.append(n.target_node) - - except DependencyCycleError as e: - if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[-1]: - e.cycle_nodes.insert(0, n.target_node) - raise - - finally: - walking.remove(n.target_node) - - -def _get_libdeps(node, debug=False): - """Given a SCons Node, return its library dependencies, topologically sorted. - - Computes the dependencies if they're not already cached. - """ - - cache = getattr(node.attributes, Constants.LibdepsCached, None) - if cache is not None: - if debug: - print(" Cache:") - for dep in cache: - print(f" * {str(dep)}") - return cache - - if debug: - print(f" Edges:") - - tsorted = [] - - marked = defaultdict(lambda: LibdepsVisitationMark.UNMARKED) - walking = set() - - for child in _get_sorted_direct_libdeps(node): - if child.dependency_type != deptype.Interface: - _libdeps_visit(child, tsorted, marked, walking, debug=debug) - tsorted.reverse() - - setattr(node.attributes, Constants.LibdepsCached, tsorted) - return tsorted - - -def _missing_syslib(name): - return Constants.MissingLibdep + name - - -def update_scanner(env, builder_name=None, debug=False): - """Update the scanner for "builder" to also scan library dependencies.""" - - builder = env["BUILDERS"][builder_name] - old_scanner = builder.target_scanner - - if old_scanner: - path_function = old_scanner.path_function - else: - path_function = None - - def new_scanner(node, env, path=()): - if debug: - print(f"LIBDEPS SCANNER: {str(node)}") - print(f" Declared dependencies:") - print(f" global: {env.get(Constants.LibdepsGlobal, None)}") - print(f" private: {env.get(Constants.LibdepsPrivate, None)}") - print(f" public: {env.get(Constants.Libdeps, None)}") - print(f" interface: {env.get(Constants.LibdepsInterface, None)}") - print(f" no_inherit: {env.get(Constants.LibdepsNoInherit, None)}") - - if old_scanner: - result = old_scanner.function(node, env, path) - else: - result = [] - result.extend(_get_libdeps(node, debug=debug)) - if debug: - print(f" Build dependencies:") - print('\n'.join([' * ' + str(t) for t in result])) - print('\n') - return result - - builder.target_scanner = SCons.Scanner.Scanner( - function=new_scanner, path_function=path_function - ) - - -def get_libdeps(source, target, env, for_signature, debug=False): - """Implementation of the special _LIBDEPS environment variable. - - Expands to the library dependencies for a target. - """ - - target = env.Flatten([target]) - return _get_libdeps(target[0], debug=debug) - - -def get_libdeps_objs(source, target, env, for_signature, debug=False): - objs = [] - for lib in get_libdeps(source, target, env, for_signature, debug=debug): - # This relies on Node.sources being order stable build-to-build. - objs.extend(lib.sources) - return objs - - -def stringify_deps(env, deps): - lib_link_prefix = env.subst("$LIBLINKPREFIX") - lib_link_suffix = env.subst("$LIBLINKSUFFIX") - - # Elements of libdeps are either strings (str or unicode), or they're File objects. - # If they're File objects, they can be passed straight through. If they're strings, - # they're believed to represent library short names, that should be prefixed with -l - # or the compiler-specific equivalent. I.e., 'm' becomes '-lm', but 'File("m.a") is passed - # through whole cloth. - return [f"{lib_link_prefix}{d}{lib_link_suffix}" if isinstance(d, str) else d for d in deps] - - -def get_syslibdeps(source, target, env, for_signature, debug=False, shared=True): - """ Given a SCons Node, return its system library dependencies. - - These are the dependencies listed with SYSLIBDEPS, and are linked using -l. - """ - - deps = getattr(target[0].attributes, Constants.SysLibdepsCached, None) - if deps is None: - - # Get the syslibdeps for the current node - deps = target[0].get_env().Flatten(copy.copy(target[0].get_env().get(Constants.SysLibdepsPrivate)) or []) - deps += target[0].get_env().Flatten(target[0].get_env().get(Constants.SysLibdeps) or []) - - for lib in _get_libdeps(target[0]): - - # For each libdep get its syslibdeps, and then check to see if we can - # add it to the deps list. For static build we will also include private - # syslibdeps to be transitive. For a dynamic build we will only make - # public libdeps transitive. - syslibs = [] - if not shared: - syslibs += lib.get_env().get(Constants.SysLibdepsPrivate) or [] - syslibs += lib.get_env().get(Constants.SysLibdeps) or [] - - # Validate the libdeps, a configure check has already checked what - # syslibdeps are available so we can hard fail here if a syslibdep - # is being attempted to be linked with. - for syslib in syslibs: - if not syslib: - continue - - if isinstance(syslib, str) and syslib.startswith(Constants.MissingLibdep): - raise MissingSyslibdepError(textwrap.dedent(f"""\ - LibdepsError: - Target '{str(target[0])}' depends on the availability of a - system provided library for '{syslib[len(Constants.MissingLibdep):]}', - but no suitable library was found during configuration.""" - )) - - deps.append(syslib) - - setattr(target[0].attributes, Constants.SysLibdepsCached, deps) - return stringify_deps(env, deps) - - -def _append_direct_libdeps(node, prereq_nodes): - # We do not bother to decorate nodes that are not actual Objects - if type(node) == str: - return - if getattr(node.attributes, "libdeps_direct", None) is None: - node.attributes.libdeps_direct = [] - node.attributes.libdeps_direct.extend(prereq_nodes) - - -def _get_flagged_libdeps(source, target, env, for_signature): - for lib in get_libdeps(source, target, env, for_signature): - # Make sure lib is a Node so we can get the env to check for flags. - libnode = lib - if not isinstance(lib, (str, SCons.Node.FS.File, SCons.Node.FS.Entry)): - libnode = env.File(lib) - - # Create a libdep and parse the prefix and postfix (and separators if any) - # flags from the environment. - cur_lib = FlaggedLibdep(libnode, env) - yield cur_lib - - -def _get_node_with_ixes(env, node, node_builder_type): - """ - Gets the node passed in node with the correct ixes applied - for the given builder type. - """ - - if not node: - return node - - node_builder = env["BUILDERS"][node_builder_type] - node_factory = node_builder.target_factory or env.File - - # Cache the 'ixes' in a function scope global so we don't need - # to run SCons performance intensive 'subst' each time - cache_key = (id(env), node_builder_type) - try: - prefix, suffix = _get_node_with_ixes.node_type_ixes[cache_key] - except KeyError: - prefix = node_builder.get_prefix(env) - suffix = node_builder.get_suffix(env) - - # TODO(SERVER-50681): Find a way to do this that doesn't hard - # code these extensions. See the code review for SERVER-27507 - # for additional discussion. - if suffix == ".dll": - suffix = ".lib" - - _get_node_with_ixes.node_type_ixes[cache_key] = (prefix, suffix) - - node_with_ixes = SCons.Util.adjustixes(node, prefix, suffix) - return node_factory(node_with_ixes) - -_get_node_with_ixes.node_type_ixes = dict() - -def add_node_from(env, node): - - env.GetLibdepsGraph().add_nodes_from([( - str(node.abspath), - { - NodeProps.bin_type.name: node.builder.get_name(env), - })]) - -def add_edge_from(env, from_node, to_node, visibility, direct): - - env.GetLibdepsGraph().add_edges_from([( - from_node, - to_node, - { - EdgeProps.direct.name: direct, - EdgeProps.visibility.name: int(visibility) - })]) - -def add_libdeps_node(env, target, libdeps): - - if str(target).endswith(env["SHLIBSUFFIX"]): - node = _get_node_with_ixes(env, str(target.abspath), target.get_builder().get_name(env)) - add_node_from(env, node) - - for libdep in libdeps: - if str(libdep.target_node).endswith(env["SHLIBSUFFIX"]): - add_edge_from( - env, - str(node.abspath), - str(libdep.target_node.abspath), - visibility=libdep.dependency_type, - direct=True) - - -def get_libdeps_nodes(env, target, builder, debug=False, visibility_map=None): - if visibility_map is None: - visibility_map = dependency_visibility_ignored - - if not SCons.Util.is_List(target): - target = [target] - - # Get the current list of nodes not to inherit on each target - no_inherit = set(env.get(Constants.LibdepsNoInherit, [])) - - # Get all the libdeps from the env so we can - # can append them to the current target_node. - libdeps = [] - for dep_type in sorted(visibility_map.keys()): - - if dep_type == deptype.Global: - if any("conftest" in str(t) for t in target): - # Ignore global dependencies for conftests - continue - - # Libraries may not be stored as a list in the env, - # so we must convert single library strings to a list. - libs = env.get(dep_type_to_env_var[dep_type], []).copy() - if not SCons.Util.is_List(libs): - libs = [libs] - - for lib in libs: - if not lib: - continue - - lib_with_ixes = _get_node_with_ixes(env, lib, builder) - - if lib in no_inherit: - if debug and not any("conftest" in str(t) for t in target): - print(f" {dep_type[1]} =/> {lib}") - - else: - if debug and not any("conftest" in str(t) for t in target): - print(f" {dep_type[1]} => {lib}") - - libdeps.append(dependency(lib_with_ixes, dep_type, lib)) - - return libdeps - - -def libdeps_emitter(target, source, env, debug=False, builder=None, visibility_map=None, ignore_progdeps=False): - """SCons emitter that takes values from the LIBDEPS environment variable and - converts them to File node objects, binding correct path information into - those File objects. - - Emitters run on a particular "target" node during the initial execution of - the SConscript file, rather than during the later build phase. When they - run, the "env" environment's working directory information is what you - expect it to be -- that is, the working directory is considered to be the - one that contains the SConscript file. This allows specification of - relative paths to LIBDEPS elements. - - This emitter also adds LIBSUFFIX and LIBPREFIX appropriately. - - NOTE: For purposes of LIBDEPS_DEPENDENTS propagation, only the first member - of the "target" list is made a prerequisite of the elements of LIBDEPS_DEPENDENTS. - """ - - if visibility_map is None: - visibility_map = dependency_visibility_ignored - - if debug and not any("conftest" in str(t) for t in target): - print(f"LIBDEPS EMITTER: {str(target[0])}") - print(f" Declared dependencies:") - print(f" global: {env.get(Constants.LibdepsGlobal, None)}") - print(f" private: {env.get(Constants.LibdepsPrivate, None)}") - print(f" public: {env.get(Constants.Libdeps, None)}") - print(f" interface: {env.get(Constants.LibdepsInterface, None)}") - print(f" no_inherit: {env.get(Constants.LibdepsNoInherit, None)}") - print(f" Edges:") - - libdeps = get_libdeps_nodes(env, target, builder, debug, visibility_map) - - if debug and not any("conftest" in str(t) for t in target): - print(f"\n") - - # Lint the libdeps to make sure they are following the rules. - # This will skip some or all of the checks depending on the options - # and LIBDEPS_TAGS used. - if not any("conftest" in str(t) for t in target): - LibdepLinter(env, target).lint_libdeps(libdeps) - - if env.get('SYMBOLDEPSSUFFIX', None): - for t in target: - add_libdeps_node(env, t, libdeps) - - # We ignored the visibility_map until now because we needed to use - # original dependency value for linting. Now go back through and - # use the map to convert to the desired dependencies, for example - # all Public in the static linking case. - for libdep in libdeps: - libdep.dependency_type = visibility_map[libdep.dependency_type] - - for t in target: - # target[0] must be a Node and not a string, or else libdeps will fail to - # work properly. - _append_direct_libdeps(t, libdeps) - - for dependent in env.get(Constants.LibdepsDependents, []): - if dependent is None: - continue - - visibility = deptype.Private - if isinstance(dependent, tuple): - visibility = dependent[1] - dependent = dependent[0] - - dependentNode = _get_node_with_ixes( - env, dependent, builder - ) - _append_direct_libdeps( - dependentNode, [dependency(target[0], visibility_map[visibility], dependent)] - ) - - if not ignore_progdeps: - for dependent in env.get(Constants.ProgdepsDependents, []): - if dependent is None: - continue - - visibility = deptype.Public - if isinstance(dependent, tuple): - # TODO: Error here? Non-public PROGDEPS_DEPENDENTS probably are meaningless - visibility = dependent[1] - dependent = dependent[0] - - dependentNode = _get_node_with_ixes( - env, dependent, "Program" - ) - _append_direct_libdeps( - dependentNode, [dependency(target[0], visibility_map[visibility], dependent)] - ) - - return target, source - - -def expand_libdeps_tags(source, target, env, for_signature): - results = [] - for expansion in env.get(Constants.LibdepsTagExpansion, []): - results.append(expansion(source, target, env, for_signature)) - return results - - -def expand_libdeps_with_flags(source, target, env, for_signature): - - libdeps_with_flags = [] - - # Used to make modifications to the previous libdep on the link line - # if needed. An empty class here will make the switch_flag conditionals - # below a bit cleaner. - prev_libdep = None - - for flagged_libdep in _get_flagged_libdeps(source, target, env, for_signature): - - # If there are no flags to process we can move on to the next lib. - # start_index wont mater in the case because if there are no flags - # on the previous lib, then we will never need to do the chopping - # mechanism on the next iteration. - if not flagged_libdep.prefix_flags and not flagged_libdep.postfix_flags: - libdeps_with_flags.append(str(flagged_libdep)) - prev_libdep = flagged_libdep - continue - - # This for loop will go through the previous results and remove the 'off' - # flag as well as removing the new 'on' flag. For example, let libA and libB - # both use on and off flags which would normally generate on the link line as: - # -Wl--on-flag libA.a -Wl--off-flag -Wl--on-flag libA.a -Wl--off-flag - # This loop below will spot the cases were the flag was turned off and then - # immediately turned back on - for switch_flag in getattr(flagged_libdep.libnode.attributes, 'libdeps_switch_flags', []): - if (prev_libdep and switch_flag['on'] in flagged_libdep.prefix_flags - and switch_flag['off'] in prev_libdep.postfix_flags): - - flagged_libdep.prefix_flags.remove(switch_flag['on']) - prev_libdep.postfix_flags.remove(switch_flag['off']) - - # prev_lib has had its list modified, and it has a start index - # from the last iteration, so it will chop of the end the current - # list and reapply the end with the new flags. - prev_libdep.add_lib_to_result_list(libdeps_with_flags) - - # Store the information of the len of the current list before adding - # the next set of flags as that will be the start index for the previous - # lib next time around in case there are any switch flags to chop off. - start_index = len(libdeps_with_flags) - flagged_libdep.add_lib_to_result_list(libdeps_with_flags) - - # Done processing the current lib, so set it to previous for the next iteration. - prev_libdep = flagged_libdep - prev_libdep.start_index = start_index - - return libdeps_with_flags - -def generate_libdeps_graph(env): - if env.get('SYMBOLDEPSSUFFIX', None): - - find_symbols = env.Dir("$BUILD_DIR").path + "/libdeps/find_symbols" - libdeps_graph = env.GetLibdepsGraph() - - symbol_deps = [] - for symbols_file, target_node in env.get('LIBDEPS_SYMBOL_DEP_FILES', []): - - direct_libdeps = [] - for direct_libdep in _get_sorted_direct_libdeps(target_node): - add_node_from(env, direct_libdep.target_node) - add_edge_from( - env, - str(target_node.abspath), - str(direct_libdep.target_node.abspath), - visibility=int(direct_libdep.dependency_type), - direct=True) - direct_libdeps.append(direct_libdep.target_node.abspath) - - for libdep in _get_libdeps(target_node): - if libdep.abspath not in direct_libdeps: - add_node_from(env, libdep) - add_edge_from( - env, - str(target_node.abspath), - str(libdep.abspath), - visibility=int(deptype.Public), - direct=False) - if env['PLATFORM'] == 'darwin': - sep = ' ' - else: - sep = ':' - ld_path = sep.join([os.path.dirname(str(libdep)) for libdep in _get_libdeps(target_node)]) - symbol_deps.append(env.Command( - target=symbols_file, - source=target_node, - action=SCons.Action.Action( - f'{find_symbols} $SOURCE "{ld_path}" $TARGET', - "Generating $SOURCE symbol dependencies" if not env['VERBOSE'] else ""))) - - def write_graph_hash(env, target, source): - - with open(target[0].path, 'w') as f: - json_str = json.dumps(networkx.readwrite.json_graph.node_link_data(env.GetLibdepsGraph()), sort_keys=True).encode('utf-8') - f.write(hashlib.sha256(json_str).hexdigest()) - - graph_hash = env.Command(target="$BUILD_DIR/libdeps/graph_hash.sha256", - source=symbol_deps, - action=SCons.Action.FunctionAction( - write_graph_hash, - {"cmdstr": None})) - env.Depends(graph_hash, [ - env.File("#SConstruct")] + - glob.glob("**/SConscript", recursive=True) + - [os.path.abspath(__file__), - env.File('$BUILD_DIR/mongo/util/version_constants.h')]) - - graph_node = env.Command( - target=env.get('LIBDEPS_GRAPH_FILE', None), - source=symbol_deps, - action=SCons.Action.FunctionAction( - generate_graph, - {"cmdstr": "Generating libdeps graph"})) - - env.Depends(graph_node, [graph_hash] + env.Glob("#buildscripts/libdeps/libdeps/*")) - -def generate_graph(env, target, source): - - libdeps_graph = env.GetLibdepsGraph() - - for symbol_deps_file in source: - with open(str(symbol_deps_file)) as f: - symbols = {} - try: - for symbol, lib in json.load(f).items(): - # ignore symbols from external libraries, - # they will just clutter the graph - if lib.startswith(env.Dir("$BUILD_DIR").path): - if lib not in symbols: - symbols[lib] = [] - symbols[lib].append(symbol) - except json.JSONDecodeError: - env.FatalError(f"Failed processing json file: {str(symbol_deps_file)}") - - for libdep in symbols: - from_node = os.path.abspath(str(symbol_deps_file)[:-len(env['SYMBOLDEPSSUFFIX'])]) - to_node = os.path.abspath(libdep).strip() - libdeps_graph.add_edges_from([( - from_node, - to_node, - {EdgeProps.symbols.name: " ".join(symbols[libdep]) })]) - node = env.File(str(symbol_deps_file)[:-len(env['SYMBOLDEPSSUFFIX'])]) - add_node_from(env, node) - - libdeps_graph_file = f"{env.Dir('$BUILD_DIR').path}/libdeps/libdeps.graphml" - networkx.write_graphml(libdeps_graph, libdeps_graph_file, named_key_ids=True) - with fileinput.FileInput(libdeps_graph_file, inplace=True) as file: - for line in file: - print(line.replace(str(env.Dir("$BUILD_DIR").abspath + os.sep), ''), end='') - - -def setup_environment(env, emitting_shared=False, debug='off', linting='on'): - """Set up the given build environment to do LIBDEPS tracking.""" - - LibdepLinter.skip_linting = linting == 'off' - LibdepLinter.print_linter_errors = linting == 'print' - - try: - env["_LIBDEPS"] - except KeyError: - env["_LIBDEPS"] = "$_LIBDEPS_LIBS" - - env["_LIBDEPS_TAGS"] = expand_libdeps_tags - env["_LIBDEPS_GET_LIBS"] = partial(get_libdeps, debug=debug) - env["_LIBDEPS_OBJS"] = partial(get_libdeps_objs, debug=debug) - env["_SYSLIBDEPS"] = partial(get_syslibdeps, debug=debug, shared=emitting_shared) - - env[Constants.Libdeps] = SCons.Util.CLVar() - env[Constants.SysLibdeps] = SCons.Util.CLVar() - - # Create the alias for graph generation, the existence of this alias - # on the command line will cause the libdeps-graph generation to be - # configured. - env['LIBDEPS_GRAPH_ALIAS'] = env.Alias( - 'generate-libdeps-graph', - "${BUILD_DIR}/libdeps/libdeps.graphml")[0] - - if str(env['LIBDEPS_GRAPH_ALIAS']) in COMMAND_LINE_TARGETS: - - # Detect if the current system has the tools to perform the generation. - if env.GetOption('ninja') != 'disabled': - env.FatalError("Libdeps graph generation is not supported with ninja builds.") - if not emitting_shared: - env.FatalError("Libdeps graph generation currently only supports dynamic builds.") - - if env['PLATFORM'] == 'darwin': - required_bins = ['awk', 'sed', 'otool', 'nm'] - else: - required_bins = ['awk', 'grep', 'ldd', 'nm'] - for bin in required_bins: - if not env.WhereIs(bin): - env.FatalError(f"'{bin}' not found, Libdeps graph generation requires {bin}.") - - - # The find_symbols binary is a small fast C binary which will extract the missing - # symbols from the target library, and discover what linked libraries supply it. This - # setups the binary to be built. - find_symbols_env = env.Clone() - find_symbols_env.VariantDir('${BUILD_DIR}/libdeps', 'buildscripts/libdeps', duplicate = 0) - find_symbols_node = find_symbols_env.Program( - target='${BUILD_DIR}/libdeps/find_symbols', - source=['${BUILD_DIR}/libdeps/find_symbols.c'], - CFLAGS=['-O3']) - - # Here we are setting up some functions which will return single instance of the - # network graph and symbol deps list. We also setup some environment variables - # which are used along side the functions. - symbol_deps = [] - def append_symbol_deps(env, symbol_deps_file): - env.Depends(env['LIBDEPS_GRAPH_FILE'], symbol_deps_file[0]) - symbol_deps.append(symbol_deps_file) - env.AddMethod(append_symbol_deps, "AppendSymbolDeps") - - env['LIBDEPS_SYMBOL_DEP_FILES'] = symbol_deps - env['LIBDEPS_GRAPH_FILE'] = env.File("${BUILD_DIR}/libdeps/libdeps.graphml") - env['LIBDEPS_GRAPH_SCHEMA_VERSION'] = 3 - env["SYMBOLDEPSSUFFIX"] = '.symbol_deps' - - libdeps_graph = LibdepsGraph() - libdeps_graph.graph['invocation'] = " ".join([env['ESCAPE'](str(sys.executable))] + [env['ESCAPE'](arg) for arg in sys.argv]) - libdeps_graph.graph['git_hash'] = env['MONGO_GIT_HASH'] - libdeps_graph.graph['graph_schema_version'] = env['LIBDEPS_GRAPH_SCHEMA_VERSION'] - libdeps_graph.graph['build_dir'] = env.Dir('$BUILD_DIR').path - libdeps_graph.graph['deptypes'] = json.dumps({key: value[0] for key, value in deptype.__members__.items() if isinstance(value, tuple)}) - - def get_libdeps_graph(env): - return libdeps_graph - env.AddMethod(get_libdeps_graph, "GetLibdepsGraph") - - # Now we will setup an emitter, and an additional action for several - # of the builder involved with dynamic builds. - def libdeps_graph_emitter(target, source, env): - if "conftest" not in str(target[0]): - symbol_deps_file = env.File(str(target[0]) + env['SYMBOLDEPSSUFFIX']) - env.Depends(symbol_deps_file, '${BUILD_DIR}/libdeps/find_symbols') - env.AppendSymbolDeps((symbol_deps_file,target[0])) - - return target, source - - for builder_name in ("Program", "SharedLibrary", "LoadableModule"): - builder = env['BUILDERS'][builder_name] - base_emitter = builder.emitter - new_emitter = SCons.Builder.ListEmitter([base_emitter, libdeps_graph_emitter]) - builder.emitter = new_emitter - - - env.Append( - LIBDEPS_LIBEMITTER=partial( - libdeps_emitter, - debug=debug, - builder="StaticLibrary"), - LIBEMITTER=lambda target, source, env: env["LIBDEPS_LIBEMITTER"](target, source, env), - LIBDEPS_SHAREMITTER=partial( - libdeps_emitter, - debug=debug, - builder="SharedArchive", ignore_progdeps=True), - SHAREMITTER=lambda target, source, env: env["LIBDEPS_SHAREMITTER"](target, source, env), - LIBDEPS_SHLIBEMITTER=partial( - libdeps_emitter, - debug=debug, - builder="SharedLibrary", - visibility_map=dependency_visibility_honored - ), - SHLIBEMITTER=lambda target, source, env: env["LIBDEPS_SHLIBEMITTER"](target, source, env), - LIBDEPS_PROGEMITTER=partial( - libdeps_emitter, - debug=debug, - builder="SharedLibrary" if emitting_shared else "StaticLibrary" - ), - PROGEMITTER=lambda target, source, env: env["LIBDEPS_PROGEMITTER"](target, source, env), - ) - - env["_LIBDEPS_LIBS_WITH_TAGS"] = expand_libdeps_with_flags - - env["_LIBDEPS_LIBS"] = ( - "$LINK_LIBGROUP_START " - "$_LIBDEPS_LIBS_WITH_TAGS " - "$LINK_LIBGROUP_END " - ) - - env.Prepend(_LIBFLAGS="$_LIBDEPS_TAGS $_LIBDEPS $_SYSLIBDEPS ") - for builder_name in ("Program", "SharedLibrary", "LoadableModule", "SharedArchive"): - try: - update_scanner(env, builder_name, debug=debug) - except KeyError: - pass - - -def setup_conftests(conf): - def FindSysLibDep(context, name, libs, **kwargs): - var = "LIBDEPS_" + name.upper() + "_SYSLIBDEP" - kwargs["autoadd"] = False - for lib in libs: - result = context.sconf.CheckLib(lib, **kwargs) - context.did_show_result = 1 - if result: - context.env[var] = lib - return context.Result(result) - context.env[var] = _missing_syslib(name) - return context.Result(result) - - conf.AddTest("FindSysLibDep", FindSysLibDep) diff --git a/site_scons/site_tools/icecream.py b/site_scons/site_tools/icecream.py index 7456ed0cc8f..4dadefe32ba 100644 --- a/site_scons/site_tools/icecream.py +++ b/site_scons/site_tools/icecream.py @@ -493,6 +493,12 @@ def generate(env): # seems fragile. If you find your local machine being overrun by # jobs, figure out what sort they are and extend this part of the # setup. + def icerun_generator(target, source, env, for_signature): + if "conftest" not in str(target[0]): + return '$ICERUN' + return '' + env['ICERUN_GENERATOR'] = icerun_generator + icerun_commands = [ "ARCOM", "LINKCOM", @@ -502,7 +508,7 @@ def generate(env): for command in icerun_commands: if command in env: - env[command] = " ".join(["$( $ICERUN $)", env[command]]) + env[command] = " ".join(["$( $ICERUN_GENERATOR $)", env[command]]) # Uncomment these to debug your icecc integration if env['ICECREAM_DEBUG']: diff --git a/site_scons/site_tools/mongo_test_execution.py b/site_scons/site_tools/mongo_test_execution.py index c60d5c6e164..2433e446c8c 100644 --- a/site_scons/site_tools/mongo_test_execution.py +++ b/site_scons/site_tools/mongo_test_execution.py @@ -22,22 +22,51 @@ import os +import SCons -def generate_test_execution_aliases(env, test): +import auto_install_binaries + +_proof_scanner_cache_key = "proof_scanner_cache" +_associated_proof = "associated_proof_key" + +def proof_generator_command_scanner_func(node, env, path): + results = getattr(node.attributes, _proof_scanner_cache_key, None) + if results is not None: + return results + results = env.GetTransitivelyInstalledFiles(node) + setattr(node.attributes, _proof_scanner_cache_key, results) + return results + +proof_generator_command_scanner = SCons.Scanner.Scanner( + function=proof_generator_command_scanner_func, + path_function=None, + recursive=True +) + +def auto_prove_task(env, component, role): + entry = auto_install_binaries.get_alias_map_entry(env, component, role) + return [ + getattr(f.attributes, _associated_proof) + for f in entry.files + if hasattr(f.attributes, _associated_proof) + ] +def generate_test_execution_aliases(env, test): installed = [test] if env.get("AUTO_INSTALL_ENABLED", False) and env.GetAutoInstalledFiles(test): installed = env.GetAutoInstalledFiles(test) - target_name = os.path.basename(installed[0].get_path()) - command = env.Command( - target="#+{}".format(target_name), - source=installed, - action="${SOURCES[0]} $UNITTEST_FLAGS", + target_name = os.path.basename(installed[0].path) + + target_command = env.Command( + target=f"#+{target_name}", + source=installed[0], + action="$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS", NINJA_POOL="console", ) + env.Pseudo(target_command) + env.Alias("test-execution-aliases", target_command) - env.Alias("test-execution-aliases", command) for source in test.sources: source_base_name = os.path.basename(source.get_path()) # Strip suffix @@ -51,15 +80,53 @@ def generate_test_execution_aliases(env, test): continue source_command = env.Command( - target="#+{}".format(source_name), - source=installed, - action="${SOURCES[0]} -fileNameFilter $TEST_SOURCE_FILE_NAME $UNITTEST_FLAGS", + target=f"#+{source_name}", + source=installed[0], + action="$( $ICERUN $) ${SOURCES[0]} -fileNameFilter $TEST_SOURCE_FILE_NAME $UNITTEST_FLAGS", TEST_SOURCE_FILE_NAME=source_name, NINJA_POOL="console", ) + env.Pseudo(source_command) + env.Alias('test-execution-aliases', source_command) + + proof_generator_command = env.Command( + target=[ + '${SOURCE}.log', + '${SOURCE}.status', + ], + source=installed[0], + action=SCons.Action.Action( + "$PROOF_GENERATOR_COMMAND", + "$PROOF_GENERATOR_COMSTR" + ), + source_scanner=proof_generator_command_scanner + ) + + # We assume tests are provable by default, but some tests may not + # be. Such tests can be tagged with UNDECIDABLE_TEST=True. If a + # test isn't provable, we disable caching its results and require + # it to be always rebuilt. + if installed[0].env.get('UNDECIDABLE_TEST', False): + env.NoCache(proof_generator_command) + env.AlwaysBuild(proof_generator_command) + + proof_analyzer_command = env.Command( + target='${SOURCES[1].base}.proof', + source=proof_generator_command, + action=SCons.Action.Action( + "$PROOF_ANALYZER_COMMAND", + "$PROOF_ANALYZER_COMSTR" + ) + ) + + proof_analyzer_alias = env.Alias( + f"prove-{target_name}", + proof_analyzer_command, + ) - env.Alias("test-execution-aliases", source_command) + setattr(installed[0].attributes, _associated_proof, proof_analyzer_alias) + # TODO: Should we enable proof at the file level? def exists(env): return True @@ -74,5 +141,29 @@ def generate(env): "TEST_EXECUTION_SUFFIX_DENYLIST", [".in"] ) - # TODO: Remove when the new ninja generator is the only supported generator - env["_NINJA_NO_TEST_EXECUTION"] = True + env.AppendUnique( + AIB_TASKS={ + "prove": (auto_prove_task, False), + } + ) + + # TODO: Should we have some sort of prefix_xdir for the output location for these? Something like + # $PREFIX_VARCACHE and which in our build is pre-populated to $PREFIX/var/cache/mongo or similar? + + if env['PLATFORM'] == 'win32': + env["PROOF_GENERATOR_COMMAND"] = "$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS > ${TARGETS[0]} 2>&1 & call echo %^errorlevel% > ${TARGETS[1]}" + + # Keeping this here for later, but it only works if cmd.exe is + # launched with /V, and SCons doesn't do that. + # + # env["PROOF_ANALYZER_COMMAND"] = "set /p nextErrorLevel=<${SOURCES[1]} & if \"!nextErrorLevel!\"==\"0 \" (type nul > $TARGET) else (exit 1)" + # + # Instead, use grep! I mean findstr. + env["PROOF_ANALYZER_COMMAND"] = "findstr /B /L 0 ${SOURCES[1]} && (type nul > $TARGET) || (exit 1)" + else: + env["PROOF_GENERATOR_COMMAND"] = "$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS > ${TARGETS[0]} 2>&1 ; echo $? > ${TARGETS[1]}" + env["PROOF_ANALYZER_COMMAND"] = "if $$(exit $$(cat ${SOURCES[1]})) ; then touch $TARGET ; else exit 1 ; fi" + + # TODO: Condition this on verbosity + env['PROOF_GENERATOR_COMSTR'] = "Running test ${SOURCES[0]}" + env['PROOF_ANALYZER_COMSTR'] = "Analyzing test results in ${SOURCES[1]}" diff --git a/site_scons/site_tools/next/ccache.py b/site_scons/site_tools/next/ccache.py deleted file mode 100644 index de7d03e5472..00000000000 --- a/site_scons/site_tools/next/ccache.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2020 MongoDB Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -import math -import os -import re -import subprocess - -import SCons -from pkg_resources import parse_version - -# This is the oldest version of ccache that offers support for -gsplit-dwarf -_ccache_version_min = parse_version("3.2.3") - - -def exists(env): - """Look for a viable ccache implementation that meets our version requirements.""" - if not env.subst("$CCACHE"): - return False - - ccache = env.WhereIs("$CCACHE") - if not ccache: - print(f"Error: ccache not found at {env['CCACHE']}") - return False - - if 'CCACHE_VERSION' in env and env['CCACHE_VERSION'] >= _ccache_version_min: - return True - - pipe = SCons.Action._subproc( - env, - SCons.Util.CLVar(ccache) + ["--version"], - stdin="devnull", - stderr="devnull", - stdout=subprocess.PIPE, - ) - - if pipe.wait() != 0: - print(f"Error: failed to execute '{env['CCACHE']}'") - return False - - validated = False - for line in pipe.stdout: - line = line.decode("utf-8") - if validated: - continue # consume all data - version_banner = re.search(r"^ccache version", line) - if not version_banner: - continue - ccache_version = re.split("ccache version (.+)", line) - if len(ccache_version) < 2: - continue - ccache_version = parse_version(ccache_version[1]) - if ccache_version >= _ccache_version_min: - validated = True - - if validated: - env['CCACHE_VERSION'] = ccache_version - else: - print(f"Error: failed to verify ccache version >= {_ccache_version_min}, found {ccache_version}") - - return validated - - -def generate(env): - """Add ccache support.""" - - # Absoluteify - env["CCACHE"] = env.WhereIs("$CCACHE") - - # Propagate CCACHE related variables into the command environment - for var, host_value in os.environ.items(): - if var.startswith("CCACHE_"): - env["ENV"][var] = host_value - - # SERVER-48289: Adding roll-your-own CFLAGS and CXXFLAGS can cause some very "weird" issues - # with using icecc and ccache if they turn out not to be supported by the compiler. Rather - # than try to filter each and every flag someone might try for the ones we know don't - # work, we'll just let the compiler ignore them. A better approach might be to pre-filter - # flags coming in from the environment by passing them through the appropriate *IfSupported - # method, but that's a much larger effort. - if env.ToolchainIs("clang"): - env.AppendUnique(CCFLAGS=["-Qunused-arguments"]) - - # Check whether icecream is requested and is a valid tool. - if "ICECC" in env: - icecream = SCons.Tool.Tool('icecream') - icecream_enabled = bool(icecream) and icecream.exists(env) - else: - icecream_enabled = False - - # Set up a performant ccache configuration. Here, we don't use a second preprocessor and - # pass preprocessor arguments that deterministically expand source files so a stable - # hash can be calculated on them. This both reduces the amount of work ccache needs to - # do and increases the likelihood of a cache hit. - if env.ToolchainIs("clang"): - env["ENV"].pop("CCACHE_CPP2", None) - env["ENV"]["CCACHE_NOCPP2"] = "1" - env.AppendUnique(CCFLAGS=["-frewrite-includes"]) - elif env.ToolchainIs("gcc"): - if icecream_enabled: - # Newer versions of Icecream will drop -fdirectives-only from - # preprocessor and compiler flags if it does not find a remote - # build host to build on. ccache, on the other hand, will not - # pass the flag to the compiler if CCACHE_NOCPP2=1, but it will - # pass it to the preprocessor. The combination of setting - # CCACHE_NOCPP2=1 and passing the flag can lead to build - # failures. - - # See: https://jira.mongodb.org/browse/SERVER-48443 - # We have an open issue with Icecream and ccache to resolve the - # cause of these build failures. Once the bug is resolved and - # the fix is deployed, we can remove this entire conditional - # branch and make it like the one for clang. - # TODO: https://github.com/icecc/icecream/issues/550 - env["ENV"].pop("CCACHE_CPP2", None) - env["ENV"]["CCACHE_NOCPP2"] = "1" - else: - env["ENV"].pop("CCACHE_NOCPP2", None) - env["ENV"]["CCACHE_CPP2"] = "1" - env.AppendUnique(CCFLAGS=["-fdirectives-only"]) - - # Ensure ccache accounts for any extra files in use that affects the generated object - # file. This can be used for situations where a file is passed as an argument to a - # compiler parameter and differences in the file need to be accounted for in the - # hash result to prevent erroneous cache hits. - if "CCACHE_EXTRAFILES" in env and env["CCACHE_EXTRAFILES"]: - env["ENV"]["CCACHE_EXTRAFILES"] = ":".join([ - denyfile.path - for denyfile in env["CCACHE_EXTRAFILES"] - ]) - - # Make a generator to expand to CCACHE in the case where we are - # not a conftest. We don't want to use ccache for configure tests - # because we don't want to use icecream for configure tests, but - # when icecream and ccache are combined we can't easily filter out - # configure tests for icecream since in that combination we use - # CCACHE_PREFIX to express the icecc tool, and at that point it is - # too late for us to meaningfully filter out conftests. So we just - # disable ccache for conftests entirely. Which feels safer - # somehow anyway. - def ccache_generator(target, source, env, for_signature): - if "conftest" not in str(target[0]): - return '$CCACHE' - return '' - env['CCACHE_GENERATOR'] = ccache_generator - - # Add ccache to the relevant command lines. Wrap the reference to - # ccache in the $( $) pattern so that turning ccache on or off - # doesn't invalidate your build. - env["CCCOM"] = "$( $CCACHE_GENERATOR $)" + env["CCCOM"] - env["CXXCOM"] = "$( $CCACHE_GENERATOR $)" + env["CXXCOM"] - env["SHCCCOM"] = "$( $CCACHE_GENERATOR $)" + env["SHCCCOM"] - env["SHCXXCOM"] = "$( $CCACHE_GENERATOR $)" + env["SHCXXCOM"] diff --git a/site_scons/site_tools/next/icecream.py b/site_scons/site_tools/next/icecream.py deleted file mode 100644 index 4dadefe32ba..00000000000 --- a/site_scons/site_tools/next/icecream.py +++ /dev/null @@ -1,585 +0,0 @@ -# Copyright 2020 MongoDB Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -import os -import re -import subprocess -import urllib - -from pkg_resources import parse_version - -import SCons - -_icecream_version_min = parse_version("1.1rc2") -_icecream_version_gcc_remote_cpp = parse_version("1.2") - - -# I'd prefer to use value here, but amazingly, its __str__ returns the -# *initial* value of the Value and not the built value, if -# available. That seems like a bug. In the meantime, make our own very -# sinmple Substition thing. -class _BoundSubstitution: - def __init__(self, env, expression): - self.env = env - self.expression = expression - self.result = None - - def __str__(self): - if self.result is None: - self.result = self.env.subst(self.expression) - return self.result - - -def icecc_create_env(env, target, source, for_signature): - # Safe to assume unix here because icecream only works on Unix - mkdir = "mkdir -p ${TARGET.dir}" - - # Create the env, use awk to get just the tarball name and we store it in - # the shell variable $ICECC_VERSION_TMP so the subsequent mv command and - # store it in a known location. Add any files requested from the user environment. - create_env = "ICECC_VERSION_TMP=$$(${SOURCES[0]} --$ICECC_COMPILER_TYPE ${SOURCES[1]} ${SOURCES[2]}" - - # TODO: It would be a little more elegant if things in - # ICECC_CREATE_ENV_ADDFILES were handled as sources, because we - # would get automatic dependency tracking. However, there are some - # wrinkles around the mapped case so we have opted to leave it as - # just interpreting the env for now. - for addfile in env.get('ICECC_CREATE_ENV_ADDFILES', []): - if isinstance(addfile, tuple): - if len(addfile) == 2: - if env['ICECREAM_VERSION'] > parse_version('1.1'): - raise Exception("This version of icecream does not support addfile remapping.") - create_env += " --addfile {}={}".format( - env.File(addfile[0]).srcnode().abspath, - env.File(addfile[1]).srcnode().abspath) - env.Depends(target, addfile[1]) - else: - raise Exception(f"Found incorrect icecream addfile format: {str(addfile)}" + - f"\ntuple must two elements of the form" + - f"\n('chroot dest path', 'source file path')") - else: - try: - create_env += f" --addfile {env.File(addfile).srcnode().abspath}" - env.Depends(target, addfile) - except: - # NOTE: abspath is required by icecream because of - # this line in icecc-create-env: - # https://github.com/icecc/icecream/blob/10b9468f5bd30a0fdb058901e91e7a29f1bfbd42/client/icecc-create-env.in#L534 - # which cuts out the two files based off the equals sign and - # starting slash of the second file - raise Exception(f"Found incorrect icecream addfile format: {type(addfile)}" + - f"\nvalue provided cannot be converted to a file path") - - create_env += " | awk '/^creating .*\\.tar\\.gz/ { print $$2 }')" - - # Simply move our tarball to the expected locale. - mv = "mv $$ICECC_VERSION_TMP $TARGET" - - # Daisy chain the commands and then let SCons Subst in the rest. - cmdline = f"{mkdir} && {create_env} && {mv}" - return cmdline - - -def generate(env): - # icecc lower then 1.1 supports addfile remapping accidentally - # and above it adds an empty cpuinfo so handle cpuinfo issues for icecream - # below version 1.1 - if (env['ICECREAM_VERSION'] <= parse_version('1.1') - and env.ToolchainIs("clang") - and os.path.exists('/proc/cpuinfo')): - env.AppendUnique(ICECC_CREATE_ENV_ADDFILES=[('/proc/cpuinfo', '/dev/null')]) - - # Absoluteify, so we can derive ICERUN - env["ICECC"] = env.WhereIs("$ICECC") - - if "ICERUN" in env: - # Absoluteify, for parity with ICECC - icerun = env.WhereIs("$ICERUN") - else: - icerun = env.File("$ICECC").File("icerun") - env["ICERUN"] = icerun - - if "ICECC_CREATE_ENV" in env: - icecc_create_env_bin = env.WhereIs("$ICECC_CREATE_ENV") - else: - icecc_create_env_bin = env.File("ICECC").File("icecc-create-env") - env["ICECC_CREATE_ENV"] = icecc_create_env_bin - - # Make CC and CXX absolute paths too. This ensures the correct paths to - # compilers get passed to icecc-create-env rather than letting it - # potentially discover something we don't expect via PATH. - env["CC"] = env.WhereIs("$CC") - env["CXX"] = env.WhereIs("$CXX") - - # Set up defaults for configuration options - env['ICECREAM_TARGET_DIR'] = env.Dir( - env.get('ICECREAM_TARGET_DIR', '#./.icecream') - ) - verbose = env.get('ICECREAM_VERBOSE', False) - env['ICECREAM_DEBUG'] = env.get('ICECREAM_DEBUG', False) - - # We have a lot of things to build and run that the final user - # environment doesn't need to see or know about. Make a custom env - # that we use consistently from here to where we end up setting - # ICECREAM_RUN_ICECC in the user env. - setupEnv = env.Clone( - NINJA_SKIP=True - ) - - if 'ICECC_VERSION' in setupEnv and bool(setupEnv['ICECC_VERSION']): - - if setupEnv["ICECC_VERSION"].startswith("http"): - - quoted = urllib.parse.quote(setupEnv['ICECC_VERSION'], safe=[]) - - # Use curl / wget to download the toolchain because SCons (and ninja) - # are better at running shell commands than Python functions. - # - # TODO: This all happens SCons side now. Should we just use python to - # fetch instead? - curl = setupEnv.WhereIs("curl") - wget = setupEnv.WhereIs("wget") - - if curl: - cmdstr = "curl -L" - elif wget: - cmdstr = "wget" - else: - raise Exception( - "You have specified an ICECC_VERSION that is a URL but you have neither wget nor curl installed." - ) - - # Copy ICECC_VERSION into ICECC_VERSION_URL so that we can - # change ICECC_VERSION without perturbing the effect of - # the action. - setupEnv['ICECC_VERSION_URL'] = setupEnv['ICECC_VERSION'] - setupEnv['ICECC_VERSION'] = icecc_version_file = setupEnv.Command( - target=f"$ICECREAM_TARGET_DIR/{quoted}", - source=[setupEnv.Value(quoted)], - action=SCons.Action.Action( - f"{cmdstr} -o $TARGET $ICECC_VERSION_URL", - "Downloading compiler package from $ICECC_VERSION_URL" if not verbose else str(), - ), - )[0] - - else: - # Convert the users selection into a File node and do some basic validation - setupEnv['ICECC_VERSION'] = icecc_version_file = setupEnv.File('$ICECC_VERSION') - - if not icecc_version_file.exists(): - raise Exception( - 'The ICECC_VERSION variable set set to {}, but this file does not exist'.format(icecc_version_file) - ) - - # This is what we are going to call the file names as known to SCons on disk - setupEnv["ICECC_VERSION_ID"] = "user_provided." + icecc_version_file.name - - else: - - setupEnv["ICECC_COMPILER_TYPE"] = setupEnv.get( - "ICECC_COMPILER_TYPE", os.path.basename(setupEnv.WhereIs("${CC}")) - ) - - # This is what we are going to call the file names as known to SCons on disk. We do the - # subst early so that we can call `replace` on the result. - setupEnv["ICECC_VERSION_ID"] = setupEnv.subst( - "icecc-create-env.${CC}${CXX}.tar.gz").replace("/", "_" - ) - - setupEnv["ICECC_VERSION"] = icecc_version_file = setupEnv.Command( - target="$ICECREAM_TARGET_DIR/$ICECC_VERSION_ID", - source=[ - "$ICECC_CREATE_ENV", - "$CC", - "$CXX" - ], - action=SCons.Action.Action( - icecc_create_env, - "Generating icecream compiler package: $TARGET" if not verbose else str(), - generator=True, - ) - )[0] - - # At this point, all paths above have produced a file of some sort. We now move on - # to producing our own signature for this local file. - - setupEnv.Append( - ICECREAM_TARGET_BASE_DIR='$ICECREAM_TARGET_DIR', - ICECREAM_TARGET_BASE_FILE='$ICECC_VERSION_ID', - ICECREAM_TARGET_BASE='$ICECREAM_TARGET_BASE_DIR/$ICECREAM_TARGET_BASE_FILE', - ) - - # If the file we are planning to use is not within - # ICECREAM_TARGET_DIR then make a local copy of it that is. - if icecc_version_file.dir != env['ICECREAM_TARGET_DIR']: - setupEnv["ICECC_VERSION"] = icecc_version_file = setupEnv.Command( - target=[ - '${ICECREAM_TARGET_BASE}.local', - ], - source=icecc_version_file, - action=SCons.Defaults.Copy('$TARGET', '$SOURCE'), - ) - - # There is no point caching the copy. - setupEnv.NoCache(icecc_version_file) - - # Now, we compute our own signature of the local compiler package, - # and create yet another link to the compiler package with a name - # containing our computed signature. Now we know that we can give - # this filename to icecc and it will be assured to really reflect - # the contents of the package, and not the arbitrary naming of the - # file as found on the users filesystem or from - # icecc-create-env. We put the absolute path to that filename into - # a file that we can read from. - icecc_version_info = setupEnv.File(setupEnv.Command( - target=[ - '${ICECREAM_TARGET_BASE}.sha256', - '${ICECREAM_TARGET_BASE}.sha256.path', - ], - source=icecc_version_file, - action=SCons.Action.ListAction( - [ - - # icecc-create-env run twice with the same input will - # create files with identical contents, and identical - # filenames, but with different hashes because it - # includes timestamps. So we compute a new hash based - # on the actual stream contents of the file by - # untarring it into shasum. - SCons.Action.Action( - "tar xfO ${SOURCES[0]} | shasum -b -a 256 - | awk '{ print $1 }' > ${TARGETS[0]}", - "Calculating sha256 sum of ${SOURCES[0]}" if not verbose else str(), - ), - - SCons.Action.Action( - "ln -f ${SOURCES[0]} ${TARGETS[0].dir}/icecream_py_sha256_$$(cat ${TARGETS[0]}).tar.gz", - "Linking ${SOURCES[0]} to its sha256 sum name" if not verbose else str(), - ), - - SCons.Action.Action( - "echo ${TARGETS[0].dir.abspath}/icecream_py_sha256_$$(cat ${TARGETS[0]}).tar.gz > ${TARGETS[1]}", - "Storing sha256 sum name for ${SOURCES[0]} to ${TARGETS[1]}" if not verbose else str(), - ) - ], - ) - )) - - # We can't allow these to interact with the cache because the - # second action produces a file unknown to SCons. If caching were - # permitted, the other two files could be retrieved from cache but - # the file produced by the second action could not (and would not) - # be. We would end up with a broken setup. - setupEnv.NoCache(icecc_version_info) - - # Create a value node that, when built, contains the result of - # reading the contents of the sha256.path file. This way we can - # pull the value out of the file and substitute it into our - # wrapper script. - icecc_version_string_value = setupEnv.Command( - target=setupEnv.Value(None), - source=[ - icecc_version_info[1] - ], - action=SCons.Action.Action( - lambda env, target, source: target[0].write(source[0].get_text_contents()), - "Reading compiler package sha256 sum path from $SOURCE" if not verbose else str(), - ) - )[0] - - def icecc_version_string_generator(source, target, env, for_signature): - if for_signature: - return icecc_version_string_value.get_csig() - return icecc_version_string_value.read() - - # Set the values that will be interpolated into the run-icecc script. - setupEnv['ICECC_VERSION'] = icecc_version_string_generator - - # If necessary, we include the users desired architecture in the - # interpolated file. - icecc_version_arch_string = str() - if "ICECC_VERSION_ARCH" in setupEnv: - icecc_version_arch_string = "${ICECC_VERSION_ARCH}:" - - # Finally, create the run-icecc wrapper script. The contents will - # re-invoke icecc with our sha256 sum named file, ensuring that we - # trust the signature to be appropriate. In a pure SCons build, we - # actually wouldn't need this Substfile, we could just set - # env['ENV]['ICECC_VERSION'] to the Value node above. But that - # won't work for Ninja builds where we can't ask for the contents - # of such a node easily. Creating a Substfile means that SCons - # will take care of generating a file that Ninja can use. - run_icecc = setupEnv.Textfile( - target="$ICECREAM_TARGET_DIR/run-icecc.sh", - source=[ - '#!/bin/sh', - 'ICECC_VERSION=@icecc_version_arch@@icecc_version@ exec @icecc@ "$@"', - '', - ], - SUBST_DICT={ - '@icecc@' : '$ICECC', - '@icecc_version@' : '$ICECC_VERSION', - '@icecc_version_arch@' : icecc_version_arch_string, - }, - - # Don't change around the suffixes - TEXTFILEPREFIX=str(), - TEXTFILESUFFIX=str(), - - # Somewhat surprising, but even though Ninja will defer to - # SCons to invoke this, we still need ninja to be aware of it - # so that it knows to invoke SCons to produce it as part of - # TEMPLATE expansion. Since we have set NINJA_SKIP=True for - # setupEnv, we need to reverse that here. - NINJA_SKIP=False - ) - - setupEnv.AddPostAction( - run_icecc, - action=SCons.Defaults.Chmod('$TARGET', "u+x"), - ) - - setupEnv.Depends( - target=run_icecc, - dependency=[ - - # TODO: Without the ICECC dependency, changing ICECC doesn't cause the Substfile - # to regenerate. Why is this? - '$ICECC', - - # This dependency is necessary so that we build into this - # string before we create the file. - icecc_version_string_value, - - # TODO: SERVER-50587 We need to make explicit depends here because of NINJA_SKIP. Any - # dependencies in the nodes created in setupEnv with NINJA_SKIP would have - # that dependency chain hidden from ninja, so they won't be rebuilt unless - # added as dependencies here on this node that has NINJA_SKIP=False. - '$CC', - '$CXX', - icecc_version_file, - ], - ) - - # From here out, we make changes to the users `env`. - setupEnv = None - - env['ICECREAM_RUN_ICECC'] = run_icecc[0] - - def icecc_toolchain_dependency_emitter(target, source, env): - if "conftest" not in str(target[0]): - # Requires or Depends? There are trade-offs: - # - # If it is `Depends`, then enabling or disabling icecream - # will cause a global recompile. But, if you regenerate a - # new compiler package, you will get a rebuild. If it is - # `Requires`, then enabling or disabling icecream will not - # necessarily cause a global recompile (it depends if - # C[,C,XX]FLAGS get changed when you do so), but on the - # other hand if you regenerate a new compiler package you - # will *not* get a rebuild. - # - # For now, we are opting for `Requires`, because it seems - # preferable that opting in or out of icecream shouldn't - # force a rebuild. - env.Requires(target, "$ICECREAM_RUN_ICECC") - return target, source - - # Cribbed from Tool/cc.py and Tool/c++.py. It would be better if - # we could obtain this from SCons. - _CSuffixes = [".c"] - if not SCons.Util.case_sensitive_suffixes(".c", ".C"): - _CSuffixes.append(".C") - - _CXXSuffixes = [".cpp", ".cc", ".cxx", ".c++", ".C++"] - if SCons.Util.case_sensitive_suffixes(".c", ".C"): - _CXXSuffixes.append(".C") - - suffixes = _CSuffixes + _CXXSuffixes - for object_builder in SCons.Tool.createObjBuilders(env): - emitterdict = object_builder.builder.emitter - for suffix in emitterdict.keys(): - if not suffix in suffixes: - continue - base = emitterdict[suffix] - emitterdict[suffix] = SCons.Builder.ListEmitter( - [base, icecc_toolchain_dependency_emitter] - ) - - # Check whether ccache is requested and is a valid tool. - if "CCACHE" in env: - ccache = SCons.Tool.Tool('ccache') - ccache_enabled = bool(ccache) and ccache.exists(env) - else: - ccache_enabled = False - - if env.ToolchainIs("clang"): - env["ENV"]["ICECC_CLANG_REMOTE_CPP"] = 1 - elif env.ToolchainIs("gcc"): - if env["ICECREAM_VERSION"] < _icecream_version_gcc_remote_cpp: - # We aren't going to use ICECC_REMOTE_CPP because icecc - # 1.1 doesn't offer it. We disallow fallback to local - # builds because the fallback is serial execution. - env["ENV"]["ICECC_CARET_WORKAROUND"] = 0 - elif not ccache_enabled: - # If we can, we should make Icecream do its own preprocessing - # to reduce concurrency on the local host. We should not do - # this when ccache is in use because ccache will execute - # Icecream to do its own preprocessing and then execute - # Icecream as the compiler on the preprocessed source. - env["ENV"]["ICECC_REMOTE_CPP"] = 1 - - if "ICECC_SCHEDULER" in env: - env["ENV"]["USE_SCHEDULER"] = env["ICECC_SCHEDULER"] - - # If ccache is in play we actually want the icecc binary in the - # CCACHE_PREFIX environment variable, not on the command line, per - # the ccache documentation on compiler wrappers. Otherwise, just - # put $ICECC on the command line. We wrap it in the magic "don't - # consider this part of the build signature" sigils in the hope - # that enabling and disabling icecream won't cause rebuilds. This - # is unlikely to really work, since above we have maybe changed - # compiler flags (things like -fdirectives-only), but we still try - # to do the right thing. - if ccache_enabled: - # If the path to CCACHE_PREFIX isn't absolute, then it will - # look it up in PATH. That isn't what we want here, we make - # the path absolute. - env['ENV']['CCACHE_PREFIX'] = _BoundSubstitution(env, "${ICECREAM_RUN_ICECC.abspath}") - else: - # Make a generator to expand to ICECC in the case where we are - # not a conftest. We never want to run conftests remotely. - # Ideally, we would do this for the CCACHE_PREFIX case above, - # but unfortunately if we did we would never actually see the - # conftests, because the BoundSubst means that we will never - # have a meaningful `target` variable when we are in ENV. - # Instead, rely on the ccache.py tool to do it's own filtering - # out of conftests. - def icecc_generator(target, source, env, for_signature): - if "conftest" not in str(target[0]): - return '$ICECREAM_RUN_ICECC' - return '' - env['ICECC_GENERATOR'] = icecc_generator - - icecc_string = "$( $ICECC_GENERATOR $)" - env["CCCOM"] = " ".join([icecc_string, env["CCCOM"]]) - env["CXXCOM"] = " ".join([icecc_string, env["CXXCOM"]]) - env["SHCCCOM"] = " ".join([icecc_string, env["SHCCCOM"]]) - env["SHCXXCOM"] = " ".join([icecc_string, env["SHCXXCOM"]]) - - # Make common non-compile jobs flow through icerun so we don't - # kill the local machine. It would be nice to plumb ICERUN in via - # SPAWN or SHELL but it is too much. You end up running `icerun - # icecc ...`, and icecream doesn't handle that. We could try to - # filter and only apply icerun if icecc wasn't present but that - # seems fragile. If you find your local machine being overrun by - # jobs, figure out what sort they are and extend this part of the - # setup. - def icerun_generator(target, source, env, for_signature): - if "conftest" not in str(target[0]): - return '$ICERUN' - return '' - env['ICERUN_GENERATOR'] = icerun_generator - - icerun_commands = [ - "ARCOM", - "LINKCOM", - "PYTHON", - "SHLINKCOM", - ] - - for command in icerun_commands: - if command in env: - env[command] = " ".join(["$( $ICERUN_GENERATOR $)", env[command]]) - - # Uncomment these to debug your icecc integration - if env['ICECREAM_DEBUG']: - env['ENV']['ICECC_DEBUG'] = 'debug' - env['ENV']['ICECC_LOGFILE'] = 'icecc.log' - - -def exists(env): - if not env.subst("$ICECC"): - return False - - icecc = env.WhereIs("$ICECC") - if not icecc: - # TODO: We should not be printing here because we don't always know the - # use case for loading this tool. It may be that the user desires - # writing this output to a log file or not even displaying it at all. - # We should instead be invoking a callback to SConstruct that it can - # interpret as needed. Or better yet, we should use some SCons logging - # and error API, if and when one should emerge. - print(f"Error: icecc not found at {env['ICECC']}") - return False - - if 'ICECREAM_VERSION' in env and env['ICECREAM_VERSION'] >= _icecream_version_min: - return True - - pipe = SCons.Action._subproc( - env, - SCons.Util.CLVar(icecc) + ["--version"], - stdin="devnull", - stderr="devnull", - stdout=subprocess.PIPE, - ) - - if pipe.wait() != 0: - print(f"Error: failed to execute '{env['ICECC']}'") - return False - - validated = False - - if "ICERUN" in env: - # Absoluteify, for parity with ICECC - icerun = env.WhereIs("$ICERUN") - else: - icerun = env.File("$ICECC").File("icerun") - if not icerun: - print(f"Error: the icerun wrapper does not exist at {icerun} as expected") - - if "ICECC_CREATE_ENV" in env: - icecc_create_env_bin = env.WhereIs("$ICECC_CREATE_ENV") - else: - icecc_create_env_bin = env.File("ICECC").File("icecc-create-env") - if not icecc_create_env_bin: - print(f"Error: the icecc-create-env utility does not exist at {icecc_create_env_bin} as expected") - - for line in pipe.stdout: - line = line.decode("utf-8") - if validated: - continue # consume all data - version_banner = re.search(r"^ICECC ", line) - if not version_banner: - continue - icecc_version = re.split("ICECC (.+)", line) - if len(icecc_version) < 2: - continue - icecc_version = parse_version(icecc_version[1]) - if icecc_version >= _icecream_version_min: - validated = True - - if validated: - env['ICECREAM_VERSION'] = icecc_version - else: - print(f"Error: failed to verify icecream version >= {_icecream_version_min}, found {icecc_version}") - - return validated diff --git a/site_scons/site_tools/next/mongo_test_execution.py b/site_scons/site_tools/next/mongo_test_execution.py deleted file mode 100644 index 2433e446c8c..00000000000 --- a/site_scons/site_tools/next/mongo_test_execution.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2020 MongoDB Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -import os - -import SCons - -import auto_install_binaries - -_proof_scanner_cache_key = "proof_scanner_cache" -_associated_proof = "associated_proof_key" - -def proof_generator_command_scanner_func(node, env, path): - results = getattr(node.attributes, _proof_scanner_cache_key, None) - if results is not None: - return results - results = env.GetTransitivelyInstalledFiles(node) - setattr(node.attributes, _proof_scanner_cache_key, results) - return results - -proof_generator_command_scanner = SCons.Scanner.Scanner( - function=proof_generator_command_scanner_func, - path_function=None, - recursive=True -) - -def auto_prove_task(env, component, role): - entry = auto_install_binaries.get_alias_map_entry(env, component, role) - return [ - getattr(f.attributes, _associated_proof) - for f in entry.files - if hasattr(f.attributes, _associated_proof) - ] - -def generate_test_execution_aliases(env, test): - installed = [test] - if env.get("AUTO_INSTALL_ENABLED", False) and env.GetAutoInstalledFiles(test): - installed = env.GetAutoInstalledFiles(test) - - target_name = os.path.basename(installed[0].path) - - target_command = env.Command( - target=f"#+{target_name}", - source=installed[0], - action="$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS", - NINJA_POOL="console", - ) - env.Pseudo(target_command) - env.Alias("test-execution-aliases", target_command) - - for source in test.sources: - source_base_name = os.path.basename(source.get_path()) - # Strip suffix - dot_idx = source_base_name.rfind(".") - suffix = source_base_name[dot_idx:] - if suffix in env["TEST_EXECUTION_SUFFIX_DENYLIST"]: - continue - - source_name = source_base_name[:dot_idx] - if target_name == source_name: - continue - - source_command = env.Command( - target=f"#+{source_name}", - source=installed[0], - action="$( $ICERUN $) ${SOURCES[0]} -fileNameFilter $TEST_SOURCE_FILE_NAME $UNITTEST_FLAGS", - TEST_SOURCE_FILE_NAME=source_name, - NINJA_POOL="console", - ) - env.Pseudo(source_command) - env.Alias('test-execution-aliases', source_command) - - proof_generator_command = env.Command( - target=[ - '${SOURCE}.log', - '${SOURCE}.status', - ], - source=installed[0], - action=SCons.Action.Action( - "$PROOF_GENERATOR_COMMAND", - "$PROOF_GENERATOR_COMSTR" - ), - source_scanner=proof_generator_command_scanner - ) - - # We assume tests are provable by default, but some tests may not - # be. Such tests can be tagged with UNDECIDABLE_TEST=True. If a - # test isn't provable, we disable caching its results and require - # it to be always rebuilt. - if installed[0].env.get('UNDECIDABLE_TEST', False): - env.NoCache(proof_generator_command) - env.AlwaysBuild(proof_generator_command) - - proof_analyzer_command = env.Command( - target='${SOURCES[1].base}.proof', - source=proof_generator_command, - action=SCons.Action.Action( - "$PROOF_ANALYZER_COMMAND", - "$PROOF_ANALYZER_COMSTR" - ) - ) - - proof_analyzer_alias = env.Alias( - f"prove-{target_name}", - proof_analyzer_command, - ) - - setattr(installed[0].attributes, _associated_proof, proof_analyzer_alias) - - # TODO: Should we enable proof at the file level? - -def exists(env): - return True - - -def generate(env): - # Used for Ninja generator to collect the test execution aliases - env.Alias("test-execution-aliases") - env.AddMethod(generate_test_execution_aliases, "GenerateTestExecutionAliases") - - env["TEST_EXECUTION_SUFFIX_DENYLIST"] = env.get( - "TEST_EXECUTION_SUFFIX_DENYLIST", [".in"] - ) - - env.AppendUnique( - AIB_TASKS={ - "prove": (auto_prove_task, False), - } - ) - - # TODO: Should we have some sort of prefix_xdir for the output location for these? Something like - # $PREFIX_VARCACHE and which in our build is pre-populated to $PREFIX/var/cache/mongo or similar? - - if env['PLATFORM'] == 'win32': - env["PROOF_GENERATOR_COMMAND"] = "$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS > ${TARGETS[0]} 2>&1 & call echo %^errorlevel% > ${TARGETS[1]}" - - # Keeping this here for later, but it only works if cmd.exe is - # launched with /V, and SCons doesn't do that. - # - # env["PROOF_ANALYZER_COMMAND"] = "set /p nextErrorLevel=<${SOURCES[1]} & if \"!nextErrorLevel!\"==\"0 \" (type nul > $TARGET) else (exit 1)" - # - # Instead, use grep! I mean findstr. - env["PROOF_ANALYZER_COMMAND"] = "findstr /B /L 0 ${SOURCES[1]} && (type nul > $TARGET) || (exit 1)" - else: - env["PROOF_GENERATOR_COMMAND"] = "$( $ICERUN $) ${SOURCES[0]} $UNITTEST_FLAGS > ${TARGETS[0]} 2>&1 ; echo $? > ${TARGETS[1]}" - env["PROOF_ANALYZER_COMMAND"] = "if $$(exit $$(cat ${SOURCES[1]})) ; then touch $TARGET ; else exit 1 ; fi" - - # TODO: Condition this on verbosity - env['PROOF_GENERATOR_COMSTR'] = "Running test ${SOURCES[0]}" - env['PROOF_ANALYZER_COMSTR'] = "Analyzing test results in ${SOURCES[1]}" diff --git a/site_scons/site_tools/next/ninja.py b/site_scons/site_tools/next/ninja.py deleted file mode 100644 index 8961764d08b..00000000000 --- a/site_scons/site_tools/next/ninja.py +++ /dev/null @@ -1,1684 +0,0 @@ -# Copyright 2020 MongoDB Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -"""Generate build.ninja files from SCons aliases.""" - -import sys -import os -import importlib -import io -import shutil -import shlex -import textwrap - -from glob import glob -from os.path import join as joinpath -from os.path import splitext - -import SCons -from SCons.Action import _string_from_cmd_list, get_default_ENV -from SCons.Util import is_List, flatten_sequence -from SCons.Script import COMMAND_LINE_TARGETS - -NINJA_STATE = None -NINJA_SYNTAX = "NINJA_SYNTAX" -NINJA_RULES = "__NINJA_CUSTOM_RULES" -NINJA_POOLS = "__NINJA_CUSTOM_POOLS" -NINJA_CUSTOM_HANDLERS = "__NINJA_CUSTOM_HANDLERS" -NINJA_BUILD = "NINJA_BUILD" -NINJA_WHEREIS_MEMO = {} -NINJA_STAT_MEMO = {} - -__NINJA_RULE_MAPPING = {} - -# These are the types that get_command can do something with -COMMAND_TYPES = ( - SCons.Action.CommandAction, - SCons.Action.CommandGeneratorAction, -) - - -def _install_action_function(_env, node): - """Install files using the install or copy commands""" - return { - "outputs": get_outputs(node), - "rule": "INSTALL", - "inputs": [get_path(src_file(s)) for s in node.sources], - "implicit": get_dependencies(node), - } - - -def _mkdir_action_function(env, node): - return { - "outputs": get_outputs(node), - "rule": "CMD", - # implicit explicitly omitted, we translate these so they can be - # used by anything that depends on these but commonly this is - # hit with a node that will depend on all of the fake - # srcnode's that SCons will never give us a rule for leading - # to an invalid ninja file. - "variables": { - # On Windows mkdir "-p" is always on - "cmd": "{mkdir} $out".format( - mkdir="mkdir" if env["PLATFORM"] == "win32" else "mkdir -p", - ), - }, - } - - -def _lib_symlink_action_function(_env, node): - """Create shared object symlinks if any need to be created""" - symlinks = getattr(getattr(node, "attributes", None), "shliblinks", None) - - if not symlinks or symlinks is None: - return None - - outputs = [link.get_dir().rel_path(linktgt) for link, linktgt in symlinks] - inputs = [link.get_path() for link, _ in symlinks] - - return { - "outputs": outputs, - "inputs": inputs, - "rule": "SYMLINK", - "implicit": get_dependencies(node), - } - - -def is_valid_dependent_node(node): - """ - Return True if node is not an alias or is an alias that has children - - This prevents us from making phony targets that depend on other - phony targets that will never have an associated ninja build - target. - - We also have to specify that it's an alias when doing the builder - check because some nodes (like src files) won't have builders but - are valid implicit dependencies. - """ - if isinstance(node, SCons.Node.Alias.Alias): - return node.children() - - if not node.env: - return True - - return not node.env.get("NINJA_SKIP") - - -def alias_to_ninja_build(node): - """Convert an Alias node into a Ninja phony target""" - return { - "outputs": get_outputs(node), - "rule": "phony", - "implicit": [ - get_path(src_file(n)) for n in node.children() if is_valid_dependent_node(n) - ], - } - - -def get_order_only(node): - """Return a list of order only dependencies for node.""" - if node.prerequisites is None: - return [] - return [get_path(src_file(prereq)) for prereq in node.prerequisites if is_valid_dependent_node(prereq)] - - -def get_dependencies(node, skip_sources=False): - """Return a list of dependencies for node.""" - if skip_sources: - return [ - get_path(src_file(child)) - for child in node.children() - if child not in node.sources and is_valid_dependent_node(child) - ] - return [get_path(src_file(child)) for child in node.children() if is_valid_dependent_node(child)] - - -def get_inputs(node, skip_unknown_types=False): - """ - Collect the Ninja inputs for node. - - If the given node has inputs which can not be converted into something - Ninja can process, this will throw an exception. Optionally, those nodes - that are not processable can be skipped as inputs with the - skip_unknown_types keyword arg. - """ - executor = node.get_executor() - if executor is not None: - inputs = executor.get_all_sources() - else: - inputs = node.sources - - # Some Nodes (e.g. Python.Value Nodes) won't have files associated. We allow these to be - # optionally skipped to enable the case where we will re-invoke SCons for things - # like TEMPLATE. Otherwise, we have no direct way to express the behavior for such - # Nodes in Ninja, so we raise a hard error - ninja_nodes = [] - for input_node in inputs: - if isinstance(input_node, (SCons.Node.FS.Base, SCons.Node.Alias.Alias)): - ninja_nodes.append(input_node) - else: - if skip_unknown_types: - continue - raise Exception("Can't process {} node '{}' as an input for '{}'".format( - type(input_node), - str(input_node), - str(node))) - - # convert node items into raw paths/aliases for ninja - return [get_path(src_file(o)) for o in ninja_nodes] - - -def get_outputs(node): - """Collect the Ninja outputs for node.""" - executor = node.get_executor() - if executor is not None: - outputs = executor.get_all_targets() - else: - if hasattr(node, "target_peers"): - outputs = node.target_peers - else: - outputs = [node] - - outputs = [get_path(o) for o in outputs] - - return outputs - -def generate_depfile(env, node, dependencies): - """ - Ninja tool function for writing a depfile. The depfile should include - the node path followed by all the dependent files in a makefile format. - - dependencies arg can be a list or a subst generator which returns a list. - """ - - depfile = os.path.join(get_path(env['NINJA_BUILDDIR']), str(node) + '.depfile') - - # subst_list will take in either a raw list or a subst callable which generates - # a list, and return a list of CmdStringHolders which can be converted into raw strings. - # If a raw list was passed in, then scons_list will make a list of lists from the original - # values and even subst items in the list if they are substitutable. Flatten will flatten - # the list in that case, to ensure for either input we have a list of CmdStringHolders. - deps_list = env.Flatten(env.subst_list(dependencies)) - - # Now that we have the deps in a list as CmdStringHolders, we can convert them into raw strings - # and make sure to escape the strings to handle spaces in paths. We also will sort the result - # keep the order of the list consistent. - escaped_depends = sorted([dep.escape(env.get("ESCAPE", lambda x: x)) for dep in deps_list]) - depfile_contents = str(node) + ": " + ' '.join(escaped_depends) - - need_rewrite = False - try: - with open(depfile, 'r') as f: - need_rewrite = (f.read() != depfile_contents) - except FileNotFoundError: - need_rewrite = True - - if need_rewrite: - os.makedirs(os.path.dirname(depfile) or '.', exist_ok=True) - with open(depfile, 'w') as f: - f.write(depfile_contents) - -class SConsToNinjaTranslator: - """Translates SCons Actions into Ninja build objects.""" - - def __init__(self, env): - self.env = env - self.func_handlers = { - # Skip conftest builders - "_createSource": ninja_noop, - # SCons has a custom FunctionAction that just makes sure the - # target isn't static. We let the commands that ninja runs do - # this check for us. - "SharedFlagChecker": ninja_noop, - # The install builder is implemented as a function action. - "installFunc": _install_action_function, - "MkdirFunc": _mkdir_action_function, - "LibSymlinksActionFunction": _lib_symlink_action_function, - } - - self.loaded_custom = False - - # pylint: disable=too-many-return-statements - def action_to_ninja_build(self, node, action=None): - """Generate build arguments dictionary for node.""" - if not self.loaded_custom: - self.func_handlers.update(self.env[NINJA_CUSTOM_HANDLERS]) - self.loaded_custom = True - - if node.builder is None: - return None - - if action is None: - action = node.builder.action - - if node.env and node.env.get("NINJA_SKIP"): - return None - - build = {} - env = node.env if node.env else self.env - - # Ideally this should never happen, and we do try to filter - # Ninja builders out of being sources of ninja builders but I - # can't fix every DAG problem so we just skip ninja_builders - # if we find one - if node.builder == self.env["BUILDERS"]["Ninja"]: - build = None - elif isinstance(action, SCons.Action.FunctionAction): - build = self.handle_func_action(node, action) - elif isinstance(action, SCons.Action.LazyAction): - # pylint: disable=protected-access - action = action._generate_cache(env) - build = self.action_to_ninja_build(node, action=action) - elif isinstance(action, SCons.Action.ListAction): - build = self.handle_list_action(node, action) - elif isinstance(action, COMMAND_TYPES): - build = get_command(env, node, action) - else: - raise Exception("Got an unbuildable ListAction for: {}".format(str(node))) - - if build is not None: - build["order_only"] = get_order_only(node) - - if 'conftest' not in str(node): - node_callback = getattr(node.attributes, "ninja_build_callback", None) - if callable(node_callback): - node_callback(env, node, build) - - return build - - def handle_func_action(self, node, action): - """Determine how to handle the function action.""" - name = action.function_name() - # This is the name given by the Subst/Textfile builders. So return the - # node to indicate that SCons is required. We skip sources here because - # dependencies don't really matter when we're going to shove these to - # the bottom of ninja's DAG anyway and Textfile builders can have text - # content as their source which doesn't work as an implicit dep in - # ninja. We suppress errors on input Nodes types that we cannot handle - # since we expect that the re-invocation of SCons will handle dependency - # tracking for those Nodes and their dependents. - if name == "_action": - return { - "rule": "TEMPLATE", - "outputs": get_outputs(node), - "inputs": get_inputs(node, skip_unknown_types=True), - "implicit": get_dependencies(node, skip_sources=True), - } - - handler = self.func_handlers.get(name, None) - if handler is not None: - return handler(node.env if node.env else self.env, node) - - raise Exception( - "Found unhandled function action {}, " - " generating scons command to build\n" - "Note: this is less efficient than Ninja," - " you can write your own ninja build generator for" - " this function using NinjaRegisterFunctionHandler".format(name) - ) - - # pylint: disable=too-many-branches - def handle_list_action(self, node, action): - """TODO write this comment""" - results = [ - self.action_to_ninja_build(node, action=act) - for act in action.list - if act is not None - ] - results = [ - result for result in results if result is not None and result["outputs"] - ] - if not results: - return None - - # No need to process the results if we only got a single result - if len(results) == 1: - return results[0] - - all_outputs = list({output for build in results for output in build["outputs"]}) - dependencies = list({dep for build in results for dep in build["implicit"]}) - - if results[0]["rule"] == "CMD": - cmdline = "" - for cmd in results: - - # Occasionally a command line will expand to a - # whitespace only string (i.e. ' '). Which is not a - # valid command but does not trigger the empty command - # condition if not cmdstr. So here we strip preceding - # and proceeding whitespace to make strings like the - # above become empty strings and so will be skipped. - cmdstr = cmd["variables"]["cmd"].strip() - if not cmdstr: - continue - - # Skip duplicate commands - if cmdstr in cmdline: - continue - - if cmdline: - cmdline += " && " - - cmdline += cmdstr - - # Remove all preceding and proceeding whitespace - cmdline = cmdline.strip() - - # Make sure we didn't generate an empty cmdline - if cmdline: - ninja_build = { - "outputs": all_outputs, - "rule": "CMD", - "variables": { - "cmd": cmdline, - "env": get_command_env(node.env if node.env else self.env), - }, - "implicit": dependencies, - } - - if node.env and node.env.get("NINJA_POOL", None) is not None: - ninja_build["pool"] = node.env["pool"] - - return ninja_build - - elif results[0]["rule"] == "phony": - return { - "outputs": all_outputs, - "rule": "phony", - "implicit": dependencies, - } - - elif results[0]["rule"] == "INSTALL": - return { - "outputs": all_outputs, - "rule": "INSTALL", - "inputs": [get_path(src_file(s)) for s in node.sources], - "implicit": dependencies, - } - - raise Exception("Unhandled list action with rule: " + results[0]["rule"]) - - -# pylint: disable=too-many-instance-attributes -class NinjaState: - """Maintains state of Ninja build system as it's translated from SCons.""" - - def __init__(self, env, ninja_syntax): - self.env = env - self.writer_class = ninja_syntax.Writer - self.__generated = False - self.translator = SConsToNinjaTranslator(env) - self.generated_suffixes = env.get("NINJA_GENERATED_SOURCE_SUFFIXES", []) - - # List of generated builds that will be written at a later stage - self.builds = dict() - - # List of targets for which we have generated a build. This - # allows us to take multiple Alias nodes as sources and to not - # fail to build if they have overlapping targets. - self.built = set() - - # SCons sets this variable to a function which knows how to do - # shell quoting on whatever platform it's run on. Here we use it - # to make the SCONS_INVOCATION variable properly quoted for things - # like CCFLAGS - scons_escape = env.get("ESCAPE", lambda x: x) - - self.variables = { - "COPY": "cmd.exe /c 1>NUL copy" if sys.platform == "win32" else "cp", - "SCONS_INVOCATION": "{} {} __NINJA_NO=1 $out".format( - sys.executable, - " ".join( - [ninja_syntax.escape(scons_escape(arg)) for arg in sys.argv if arg not in COMMAND_LINE_TARGETS] - ), - ), - "SCONS_INVOCATION_W_TARGETS": "{} {}".format( - sys.executable, " ".join([ninja_syntax.escape(scons_escape(arg)) for arg in sys.argv]) - ), - # This must be set to a global default per: - # https://ninja-build.org/manual.html - # - # (The deps section) - "msvc_deps_prefix": "Note: including file:", - } - - self.rules = { - "CMD": { - "command": "cmd /c $env$cmd" if sys.platform == "win32" else "$env$cmd", - "description": "Building $out", - "pool": "local_pool", - }, - # We add the deps processing variables to this below. We - # don't pipe these through cmd.exe on Windows because we - # use this to generate a compile_commands.json database - # which can't use the shell command as it's compile - # command. - "CC": { - "command": "$env$CC @$out.rsp", - "description": "Compiling $out", - "rspfile": "$out.rsp", - "rspfile_content": "$rspc", - }, - "CXX": { - "command": "$env$CXX @$out.rsp", - "description": "Compiling $out", - "rspfile": "$out.rsp", - "rspfile_content": "$rspc", - }, - "LINK": { - "command": "$env$LINK @$out.rsp", - "description": "Linking $out", - "rspfile": "$out.rsp", - "rspfile_content": "$rspc", - "pool": "local_pool", - }, - # Ninja does not automatically delete the archive before - # invoking ar. The ar utility will append to an existing archive, which - # can cause duplicate symbols if the symbols moved between object files. - # Native SCons will perform this operation so we need to force ninja - # to do the same. See related for more info: - # https://jira.mongodb.org/browse/SERVER-49457 - "AR": { - "command": "{}$env$AR @$out.rsp".format( - '' if sys.platform == "win32" else "rm -f $out && " - ), - "description": "Archiving $out", - "rspfile": "$out.rsp", - "rspfile_content": "$rspc", - "pool": "local_pool", - }, - "SYMLINK": { - "command": ( - "cmd /c mklink $out $in" - if sys.platform == "win32" - else "ln -s $in $out" - ), - "description": "Symlink $in -> $out", - }, - "INSTALL": { - "command": "$COPY $in $out", - "description": "Install $out", - "pool": "install_pool", - # On Windows cmd.exe /c copy does not always correctly - # update the timestamp on the output file. This leads - # to a stuck constant timestamp in the Ninja database - # and needless rebuilds. - # - # Adding restat here ensures that Ninja always checks - # the copy updated the timestamp and that Ninja has - # the correct information. - "restat": 1, - }, - "TEMPLATE": { - "command": "$SCONS_INVOCATION $out", - "description": "Rendering $out", - "pool": "scons_pool", - "restat": 1, - }, - "SCONS": { - "command": "$SCONS_INVOCATION $out", - "description": "SCons $out", - "pool": "scons_pool", - # restat - # if present, causes Ninja to re-stat the command's outputs - # after execution of the command. Each output whose - # modification time the command did not change will be - # treated as though it had never needed to be built. This - # may cause the output's reverse dependencies to be removed - # from the list of pending build actions. - # - # We use restat any time we execute SCons because - # SCons calls in Ninja typically create multiple - # targets. But since SCons is doing it's own up to - # date-ness checks it may only update say one of - # them. Restat will find out which of the multiple - # build targets did actually change then only rebuild - # those targets which depend specifically on that - # output. - "restat": 1, - }, - "REGENERATE": { - "command": "$SCONS_INVOCATION_W_TARGETS", - "description": "Regenerating $out", - "generator": 1, - "depfile": os.path.join(get_path(env['NINJA_BUILDDIR']), '$out.depfile'), - # Console pool restricts to 1 job running at a time, - # it additionally has some special handling about - # passing stdin, stdout, etc to process in this pool - # that we need for SCons to behave correctly when - # regenerating Ninja - "pool": "console", - # Again we restat in case Ninja thought the - # build.ninja should be regenerated but SCons knew - # better. - "restat": 1, - }, - } - num_jobs = self.env.get('NINJA_MAX_JOBS', self.env.GetOption("num_jobs")) - self.pools = { - "local_pool": num_jobs, - "install_pool": num_jobs / 2, - "scons_pool": 1, - } - - for rule in ["CC", "CXX"]: - if env["PLATFORM"] == "win32": - self.rules[rule]["deps"] = "msvc" - else: - self.rules[rule]["deps"] = "gcc" - self.rules[rule]["depfile"] = "$out.d" - - def add_build(self, node): - if not node.has_builder(): - return False - - if isinstance(node, SCons.Node.Alias.Alias): - build = alias_to_ninja_build(node) - else: - build = self.translator.action_to_ninja_build(node) - - # Some things are unbuild-able or need not be built in Ninja - if build is None: - return False - - node_string = str(node) - if node_string in self.builds: - raise Exception("Node {} added to ninja build state more than once".format(node_string)) - self.builds[node_string] = build - self.built.update(build["outputs"]) - return True - - def is_generated_source(self, output): - """Check if output ends with a known generated suffix.""" - _, suffix = splitext(output) - return suffix in self.generated_suffixes - - def has_generated_sources(self, output): - """ - Determine if output indicates this is a generated header file. - """ - for generated in output: - if self.is_generated_source(generated): - return True - return False - - # pylint: disable=too-many-branches,too-many-locals - def generate(self, ninja_file): - """ - Generate the build.ninja. - - This should only be called once for the lifetime of this object. - """ - if self.__generated: - return - - self.rules.update(self.env.get(NINJA_RULES, {})) - self.pools.update(self.env.get(NINJA_POOLS, {})) - - content = io.StringIO() - ninja = self.writer_class(content, width=100) - - ninja.comment("Generated by scons. DO NOT EDIT.") - - ninja.variable("builddir", get_path(self.env['NINJA_BUILDDIR'])) - - for pool_name, size in self.pools.items(): - ninja.pool(pool_name, min(self.env.get('NINJA_MAX_JOBS', size), size)) - - for var, val in self.variables.items(): - ninja.variable(var, val) - - for rule, kwargs in self.rules.items(): - if self.env.get('NINJA_MAX_JOBS') is not None and 'pool' not in kwargs: - kwargs['pool'] = 'local_pool' - ninja.rule(rule, **kwargs) - - generated_source_files = sorted({ - output - # First find builds which have header files in their outputs. - for build in self.builds.values() - if self.has_generated_sources(build["outputs"]) - for output in build["outputs"] - # Collect only the header files from the builds with them - # in their output. We do this because is_generated_source - # returns True if it finds a header in any of the outputs, - # here we need to filter so we only have the headers and - # not the other outputs. - if self.is_generated_source(output) - }) - - if generated_source_files: - ninja.build( - outputs="_generated_sources", - rule="phony", - implicit=generated_source_files - ) - - template_builders = [] - - for build in [self.builds[key] for key in sorted(self.builds.keys())]: - if build["rule"] == "TEMPLATE": - template_builders.append(build) - continue - - if "implicit" in build: - build["implicit"].sort() - - # Don't make generated sources depend on each other. We - # have to check that none of the outputs are generated - # sources and none of the direct implicit dependencies are - # generated sources or else we will create a dependency - # cycle. - if ( - generated_source_files - and not build["rule"] == "INSTALL" - and set(build["outputs"]).isdisjoint(generated_source_files) - and set(build.get("implicit", [])).isdisjoint(generated_source_files) - ): - - # Make all non-generated source targets depend on - # _generated_sources. We use order_only for generated - # sources so that we don't rebuild the world if one - # generated source was rebuilt. We just need to make - # sure that all of these sources are generated before - # other builds. - order_only = build.get("order_only", []) - order_only.append("_generated_sources") - build["order_only"] = order_only - if "order_only" in build: - build["order_only"].sort() - - # When using a depfile Ninja can only have a single output - # but SCons will usually have emitted an output for every - # thing a command will create because it's caching is much - # more complex than Ninja's. This includes things like DWO - # files. Here we make sure that Ninja only ever sees one - # target when using a depfile. It will still have a command - # that will create all of the outputs but most targets don't - # depend direclty on DWO files and so this assumption is safe - # to make. - rule = self.rules.get(build["rule"]) - - # Some rules like 'phony' and other builtins we don't have - # listed in self.rules so verify that we got a result - # before trying to check if it has a deps key. - # - # Anything using deps or rspfile in Ninja can only have a single - # output, but we may have a build which actually produces - # multiple outputs which other targets can depend on. Here we - # slice up the outputs so we have a single output which we will - # use for the "real" builder and multiple phony targets that - # match the file names of the remaining outputs. This way any - # build can depend on any output from any build. - # - # We assume that the first listed output is the 'key' - # output and is stably presented to us by SCons. For - # instance if -gsplit-dwarf is in play and we are - # producing foo.o and foo.dwo, we expect that outputs[0] - # from SCons will be the foo.o file and not the dwo - # file. If instead we just sorted the whole outputs array, - # we would find that the dwo file becomes the - # first_output, and this breaks, for instance, header - # dependency scanning. - if rule is not None and (rule.get("deps") or rule.get("rspfile")): - first_output, remaining_outputs = ( - build["outputs"][0], - build["outputs"][1:], - ) - - if remaining_outputs: - ninja.build( - outputs=sorted(remaining_outputs), rule="phony", implicit=first_output, - ) - - build["outputs"] = first_output - - # Optionally a rule can specify a depfile, and SCons can generate implicit - # dependencies into the depfile. This allows for dependencies to come and go - # without invalidating the ninja file. The depfile was created in ninja specifically - # for dealing with header files appearing and disappearing across rebuilds, but it can - # be repurposed for anything, as long as you have a way to regenerate the depfile. - # More specific info can be found here: https://ninja-build.org/manual.html#_depfile - if rule is not None and rule.get('depfile') and build.get('deps_files'): - path = build['outputs'] if SCons.Util.is_List(build['outputs']) else [build['outputs']] - generate_depfile(self.env, path[0], build.pop('deps_files', [])) - - if "inputs" in build: - build["inputs"].sort() - - ninja.build(**build) - - template_builds = dict() - for template_builder in template_builders: - - # Special handling for outputs and implicit since we need to - # aggregate not replace for each builder. - for agg_key in ["outputs", "implicit", "inputs"]: - new_val = template_builds.get(agg_key, []) - - # Use pop so the key is removed and so the update - # below will not overwrite our aggregated values. - cur_val = template_builder.pop(agg_key, []) - if is_List(cur_val): - new_val += cur_val - else: - new_val.append(cur_val) - template_builds[agg_key] = new_val - - # Collect all other keys - template_builds.update(template_builder) - - if template_builds.get("outputs", []): - ninja.build(**template_builds) - - # We have to glob the SCons files here to teach the ninja file - # how to regenerate itself. We'll never see ourselves in the - # DAG walk so we can't rely on action_to_ninja_build to - # generate this rule even though SCons should know we're - # dependent on SCons files. - # - # The REGENERATE rule uses depfile, so we need to generate the depfile - # in case any of the SConscripts have changed. The depfile needs to be - # path with in the build and the passed ninja file is an abspath, so - # we will use SCons to give us the path within the build. Normally - # generate_depfile should not be called like this, but instead be called - # through the use of custom rules, and filtered out in the normal - # list of build generation about. However, because the generate rule - # is hardcoded here, we need to do this generate_depfile call manually. - ninja_file_path = self.env.File(ninja_file).path - generate_depfile( - self.env, - ninja_file_path, - self.env['NINJA_REGENERATE_DEPS'] - ) - - ninja.build( - ninja_file_path, - rule="REGENERATE", - implicit=[__file__], - ) - - # If we ever change the name/s of the rules that include - # compile commands (i.e. something like CC) we will need to - # update this build to reflect that complete list. - ninja.build( - "compile_commands.json", - rule="CMD", - pool="console", - implicit=[ninja_file], - variables={ - "cmd": "ninja -f {} -t compdb {}CC CXX > compile_commands.json".format( - ninja_file, '-x ' if self.env.get('NINJA_COMPDB_EXPAND') else '' - ) - }, - ) - - ninja.build( - "compiledb", rule="phony", implicit=["compile_commands.json"], - ) - - # Look in SCons's list of DEFAULT_TARGETS, find the ones that - # we generated a ninja build rule for. - scons_default_targets = [ - get_path(tgt) - for tgt in SCons.Script.DEFAULT_TARGETS - if get_path(tgt) in self.built - ] - - # If we found an overlap between SCons's list of default - # targets and the targets we created ninja builds for then use - # those as ninja's default as well. - if scons_default_targets: - ninja.default(" ".join(scons_default_targets)) - - with open(ninja_file, "w") as build_ninja: - build_ninja.write(content.getvalue()) - - self.__generated = True - - -def get_path(node): - """ - Return a fake path if necessary. - - As an example Aliases use this as their target name in Ninja. - """ - if hasattr(node, "get_path"): - return node.get_path() - return str(node) - - -def rfile(node): - """ - Return the repository file for node if it has one. Otherwise return node - """ - if hasattr(node, "rfile"): - return node.rfile() - return node - - -def src_file(node): - """Returns the src code file if it exists.""" - if hasattr(node, "srcnode"): - src = node.srcnode() - if src.stat() is not None: - return src - return get_path(node) - - -def get_comstr(env, action, targets, sources): - """Get the un-substituted string for action.""" - # Despite being having "list" in it's name this member is not - # actually a list. It's the pre-subst'd string of the command. We - # use it to determine if the command we're about to generate needs - # to use a custom Ninja rule. By default this redirects CC, CXX, - # AR, SHLINK, and LINK commands to their respective rules but the - # user can inject custom Ninja rules and tie them to commands by - # using their pre-subst'd string. - if hasattr(action, "process"): - return action.cmd_list - - return action.genstring(targets, sources, env) - - -def get_command_env(env): - """ - Return a string that sets the enrivonment for any environment variables that - differ between the OS environment and the SCons command ENV. - - It will be compatible with the default shell of the operating system. - """ - try: - return env["NINJA_ENV_VAR_CACHE"] - except KeyError: - pass - - # Scan the ENV looking for any keys which do not exist in - # os.environ or differ from it. We assume if it's a new or - # differing key from the process environment then it's - # important to pass down to commands in the Ninja file. - ENV = get_default_ENV(env) - scons_specified_env = { - key: value - for key, value in ENV.items() - if key not in os.environ or os.environ.get(key, None) != value - } - - windows = env["PLATFORM"] == "win32" - command_env = "" - for key, value in scons_specified_env.items(): - # Ensure that the ENV values are all strings: - if is_List(value): - # If the value is a list, then we assume it is a - # path list, because that's a pretty common list-like - # value to stick in an environment variable: - value = flatten_sequence(value) - value = joinpath(map(str, value)) - else: - # If it isn't a string or a list, then we just coerce - # it to a string, which is the proper way to handle - # Dir and File instances and will produce something - # reasonable for just about everything else: - value = str(value) - - if windows: - command_env += "set '{}={}' && ".format(key, value) - else: - # We address here *only* the specific case that a user might have - # an environment variable which somehow gets included and has - # spaces in the value. These are escapes that Ninja handles. This - # doesn't make builds on paths with spaces (Ninja and SCons issues) - # nor expanding response file paths with spaces (Ninja issue) work. - value = value.replace(r' ', r'$ ') - command_env += "export {}='{}';".format(key, value) - - env["NINJA_ENV_VAR_CACHE"] = command_env - return command_env - - -def gen_get_response_file_command(env, rule, tool, tool_is_dynamic=False, custom_env={}): - """Generate a response file command provider for rule name.""" - - # If win32 using the environment with a response file command will cause - # ninja to fail to create the response file. Additionally since these rules - # generally are not piping through cmd.exe /c any environment variables will - # make CreateProcess fail to start. - # - # On POSIX we can still set environment variables even for compile - # commands so we do so. - use_command_env = not env["PLATFORM"] == "win32" - if "$" in tool: - tool_is_dynamic = True - - def get_response_file_command(env, node, action, targets, sources, executor=None): - if hasattr(action, "process"): - cmd_list, _, _ = action.process(targets, sources, env, executor=executor) - cmd_list = [str(c).replace("$", "$$") for c in cmd_list[0]] - else: - command = generate_command( - env, node, action, targets, sources, executor=executor - ) - cmd_list = shlex.split(command) - - if tool_is_dynamic: - tool_command = env.subst( - tool, target=targets, source=sources, executor=executor - ) - else: - tool_command = tool - - try: - # Add 1 so we always keep the actual tool inside of cmd - tool_idx = cmd_list.index(tool_command) + 1 - except ValueError: - raise Exception( - "Could not find tool {} in {} generated from {}".format( - tool, cmd_list, get_comstr(env, action, targets, sources) - ) - ) - - cmd, rsp_content = cmd_list[:tool_idx], cmd_list[tool_idx:] - rsp_content = " ".join(rsp_content) - - variables = {"rspc": rsp_content} - variables[rule] = cmd - if use_command_env: - variables["env"] = get_command_env(env) - - for key, value in custom_env.items(): - variables["env"] += env.subst( - f"export {key}={value};", target=targets, source=sources, executor=executor - ) + " " - return rule, variables, [tool_command] - - return get_response_file_command - - -def generate_command(env, node, action, targets, sources, executor=None): - # Actions like CommandAction have a method called process that is - # used by SCons to generate the cmd_line they need to run. So - # check if it's a thing like CommandAction and call it if we can. - if hasattr(action, "process"): - cmd_list, _, _ = action.process(targets, sources, env, executor=executor) - cmd = _string_from_cmd_list(cmd_list[0]) - else: - # Anything else works with genstring, this is most commonly hit by - # ListActions which essentially call process on all of their - # commands and concatenate it for us. - genstring = action.genstring(targets, sources, env) - if executor is not None: - cmd = env.subst(genstring, executor=executor) - else: - cmd = env.subst(genstring, targets, sources) - - cmd = cmd.replace("\n", " && ").strip() - if cmd.endswith("&&"): - cmd = cmd[0:-2].strip() - - # Escape dollars as necessary - return cmd.replace("$", "$$") - - -def get_generic_shell_command(env, node, action, targets, sources, executor=None): - - if env.get('NINJA_TEMPLATE'): - rule = 'TEMPLATE' - else: - rule = 'CMD' - - return ( - rule, - { - "cmd": generate_command(env, node, action, targets, sources, executor=None), - "env": get_command_env(env), - }, - # Since this function is a rule mapping provider, it must return a list of dependencies, - # and usually this would be the path to a tool, such as a compiler, used for this rule. - # However this function is to generic to be able to reliably extract such deps - # from the command, so we return a placeholder empty list. It should be noted that - # generally this function will not be used soley and is more like a template to generate - # the basics for a custom provider which may have more specific options for a provier - # function for a custom NinjaRuleMapping. - [] - ) - - -def get_command(env, node, action): # pylint: disable=too-many-branches - """Get the command to execute for node.""" - if node.env: - sub_env = node.env - else: - sub_env = env - - executor = node.get_executor() - if executor is not None: - tlist = executor.get_all_targets() - slist = executor.get_all_sources() - else: - if hasattr(node, "target_peers"): - tlist = node.target_peers - else: - tlist = [node] - slist = node.sources - - # Retrieve the repository file for all sources - slist = [rfile(s) for s in slist] - - # Generate a real CommandAction - if isinstance(action, SCons.Action.CommandGeneratorAction): - # pylint: disable=protected-access - action = action._generate(tlist, slist, sub_env, 1, executor=executor) - - variables = {} - - comstr = get_comstr(sub_env, action, tlist, slist) - if not comstr: - return None - - provider = __NINJA_RULE_MAPPING.get(comstr, get_generic_shell_command) - rule, variables, provider_deps = provider(sub_env, node, action, tlist, slist, executor=executor) - - # Get the dependencies for all targets - implicit = list({dep for tgt in tlist for dep in get_dependencies(tgt)}) - - # Now add in the other dependencies related to the command, - # e.g. the compiler binary. The ninja rule can be user provided so - # we must do some validation to resolve the dependency path for ninja. - for provider_dep in provider_deps: - - provider_dep = sub_env.subst(provider_dep) - if not provider_dep: - continue - - # If the tool is a node, then SCons will resolve the path later, if its not - # a node then we assume it generated from build and make sure it is existing. - if isinstance(provider_dep, SCons.Node.Node) or os.path.exists(provider_dep): - implicit.append(provider_dep) - continue - - # in some case the tool could be in the local directory and be suppled without the ext - # such as in windows, so append the executable suffix and check. - prog_suffix = sub_env.get('PROGSUFFIX', '') - provider_dep_ext = provider_dep if provider_dep.endswith(prog_suffix) else provider_dep + prog_suffix - if os.path.exists(provider_dep_ext): - implicit.append(provider_dep_ext) - continue - - # Many commands will assume the binary is in the path, so - # we accept this as a possible input from a given command. - - provider_dep_abspath = sub_env.WhereIs(provider_dep) or sub_env.WhereIs(provider_dep, path=os.environ["PATH"]) - if provider_dep_abspath: - implicit.append(provider_dep_abspath) - continue - - # Possibly these could be ignore and the build would still work, however it may not always - # rebuild correctly, so we hard stop, and force the user to fix the issue with the provided - # ninja rule. - raise Exception(f"Could not resolve path for {provider_dep} dependency on node '{node}'") - - ninja_build = { - "order_only": get_order_only(node), - "outputs": get_outputs(node), - "inputs": get_inputs(node), - "implicit": implicit, - "rule": rule, - "variables": variables, - } - - # Don't use sub_env here because we require that NINJA_POOL be set - # on a per-builder call basis to prevent accidental strange - # behavior like env['NINJA_POOL'] = 'console' and sub_env can be - # the global Environment object if node.env is None. - # Example: - # - # Allowed: - # - # env.Command("ls", NINJA_POOL="ls_pool") - # - # Not allowed and ignored: - # - # env["NINJA_POOL"] = "ls_pool" - # env.Command("ls") - # - if node.env and node.env.get("NINJA_POOL", None) is not None: - ninja_build["pool"] = node.env["NINJA_POOL"] - - return ninja_build - - -def ninja_builder(env, target, source): - """Generate a build.ninja for source.""" - if not isinstance(source, list): - source = [source] - if not isinstance(target, list): - target = [target] - - # We have no COMSTR equivalent so print that we're generating - # here. - print("Generating:", str(target[0])) - - generated_build_ninja = target[0].get_abspath() - NINJA_STATE.generate(generated_build_ninja) - - return 0 - - -# pylint: disable=too-few-public-methods -class AlwaysExecAction(SCons.Action.FunctionAction): - """Override FunctionAction.__call__ to always execute.""" - - def __call__(self, *args, **kwargs): - kwargs["execute"] = 1 - return super().__call__(*args, **kwargs) - - -def register_custom_handler(env, name, handler): - """Register a custom handler for SCons function actions.""" - env[NINJA_CUSTOM_HANDLERS][name] = handler - - -def register_custom_rule_mapping(env, pre_subst_string, rule): - """Register a function to call for a given rule.""" - global __NINJA_RULE_MAPPING - __NINJA_RULE_MAPPING[pre_subst_string] = rule - - -def register_custom_rule(env, rule, command, description="", deps=None, pool=None, use_depfile=False, use_response_file=False, response_file_content="$rspc"): - """Allows specification of Ninja rules from inside SCons files.""" - rule_obj = { - "command": command, - "description": description if description else "{} $out".format(rule), - } - - if use_depfile: - rule_obj["depfile"] = os.path.join(get_path(env['NINJA_BUILDDIR']), '$out.depfile') - - if deps is not None: - rule_obj["deps"] = deps - - if pool is not None: - rule_obj["pool"] = pool - - if use_response_file: - rule_obj["rspfile"] = "$out.rsp" - rule_obj["rspfile_content"] = response_file_content - - env[NINJA_RULES][rule] = rule_obj - - -def register_custom_pool(env, pool, size): - """Allows the creation of custom Ninja pools""" - env[NINJA_POOLS][pool] = size - -def set_build_node_callback(env, node, callback): - if 'conftest' not in str(node): - setattr(node.attributes, "ninja_build_callback", callback) - -def ninja_csig(original): - """Return a dummy csig""" - - def wrapper(self): - name = str(self) - if "SConscript" in name or "SConstruct" in name: - return original(self) - return "dummy_ninja_csig" - - return wrapper - - -def ninja_contents(original): - """Return a dummy content without doing IO""" - - def wrapper(self): - name = str(self) - if "SConscript" in name or "SConstruct" in name: - return original(self) - return bytes("dummy_ninja_contents", encoding="utf-8") - - return wrapper - -def CheckNinjaCompdbExpand(env, context): - """ Configure check testing if ninja's compdb can expand response files""" - - context.Message('Checking if ninja compdb can expand response files... ') - ret, output = context.TryAction( - action='ninja -f $SOURCE -t compdb -x CMD_RSP > $TARGET', - extension='.ninja', - text=textwrap.dedent(""" - rule CMD_RSP - command = $cmd @$out.rsp > fake_output.txt - description = Building $out - rspfile = $out.rsp - rspfile_content = $rspc - build fake_output.txt: CMD_RSP fake_input.txt - cmd = echo - pool = console - rspc = "test" - """)) - result = '@fake_output.txt.rsp' not in output - context.Result(result) - return result - -def ninja_stat(_self, path): - """ - Eternally memoized stat call. - - SCons is very aggressive about clearing out cached values. For our - purposes everything should only ever call stat once since we're - running in a no_exec build the file system state should not - change. For these reasons we patch SCons.Node.FS.LocalFS.stat to - use our eternal memoized dictionary. - """ - global NINJA_STAT_MEMO - - try: - return NINJA_STAT_MEMO[path] - except KeyError: - try: - result = os.stat(path) - except os.error: - result = None - - NINJA_STAT_MEMO[path] = result - return result - - -def ninja_noop(*_args, **_kwargs): - """ - A general purpose no-op function. - - There are many things that happen in SCons that we don't need and - also don't return anything. We use this to disable those functions - instead of creating multiple definitions of the same thing. - """ - return None - - -def ninja_whereis(thing, *_args, **_kwargs): - """Replace env.WhereIs with a much faster version""" - global NINJA_WHEREIS_MEMO - - # Optimize for success, this gets called significantly more often - # when the value is already memoized than when it's not. - try: - return NINJA_WHEREIS_MEMO[thing] - except KeyError: - # We do not honor any env['ENV'] or env[*] variables in the - # generated ninja ile. Ninja passes your raw shell environment - # down to it's subprocess so the only sane option is to do the - # same during generation. At some point, if and when we try to - # upstream this, I'm sure a sticking point will be respecting - # env['ENV'] variables and such but it's actually quite - # complicated. I have a naive version but making it always work - # with shell quoting is nigh impossible. So I've decided to - # cross that bridge when it's absolutely required. - path = shutil.which(thing) - NINJA_WHEREIS_MEMO[thing] = path - return path - - -def ninja_always_serial(self, num, taskmaster): - """Replacement for SCons.Job.Jobs constructor which always uses the Serial Job class.""" - # We still set self.num_jobs to num even though it's a lie. The - # only consumer of this attribute is the Parallel Job class AND - # the Main.py function which instantiates a Jobs class. It checks - # if Jobs.num_jobs is equal to options.num_jobs, so if the user - # provides -j12 but we set self.num_jobs = 1 they get an incorrect - # warning about this version of Python not supporting parallel - # builds. So here we lie so the Main.py will not give a false - # warning to users. - self.num_jobs = num - self.job = SCons.Job.Serial(taskmaster) - - -def ninja_print_conf_log(s, target, source, env): - """Command line print only for conftest to generate a correct conf log.""" - if target and "conftest" in str(target[0]): - action = SCons.Action._ActionAction() - action.print_cmd_line(s, target, source, env) - - -class NinjaNoResponseFiles(SCons.Platform.TempFileMunge): - """Overwrite the __call__ method of SCons' TempFileMunge to not delete.""" - - def __call__(self, target, source, env, for_signature): - return self.cmd - - def _print_cmd_str(*_args, **_kwargs): - """Disable this method""" - pass - - -def exists(env): - """Enable if called.""" - - # This variable disables the tool when storing the SCons command in the - # generated ninja file to ensure that the ninja tool is not loaded when - # SCons should do actual work as a subprocess of a ninja build. The ninja - # tool is very invasive into the internals of SCons and so should never be - # enabled when SCons needs to build a target. - if env.get("__NINJA_NO", "0") == "1": - return False - - return True - - -def generate(env): - """Generate the NINJA builders.""" - env[NINJA_SYNTAX] = env.get(NINJA_SYNTAX, "ninja_syntax.py") - - # Add the Ninja builder. - always_exec_ninja_action = AlwaysExecAction(ninja_builder, {}) - ninja_builder_obj = SCons.Builder.Builder(action=always_exec_ninja_action) - env.Append(BUILDERS={"Ninja": ninja_builder_obj}) - - env["NINJA_PREFIX"] = env.get("NINJA_PREFIX", "build") - env["NINJA_SUFFIX"] = env.get("NINJA_SUFFIX", "ninja") - env["NINJA_ALIAS_NAME"] = env.get("NINJA_ALIAS_NAME", "generate-ninja") - env['NINJA_BUILDDIR'] = env.get("NINJA_BUILDDIR", env.Dir(".ninja").path) - ninja_file_name = env.subst("${NINJA_PREFIX}.${NINJA_SUFFIX}") - ninja_file = env.Ninja(target=ninja_file_name, source=[]) - env.AlwaysBuild(ninja_file) - env.Alias("$NINJA_ALIAS_NAME", ninja_file) - - # TODO: API for getting the SConscripts programmatically - # exists upstream: https://github.com/SCons/scons/issues/3625 - def ninja_generate_deps(env): - return sorted([env.File("#SConstruct").path] + glob("**/SConscript", recursive=True)) - env['_NINJA_REGENERATE_DEPS_FUNC'] = ninja_generate_deps - - env['NINJA_REGENERATE_DEPS'] = env.get('NINJA_REGENERATE_DEPS', '${_NINJA_REGENERATE_DEPS_FUNC(__env__)}') - - # This adds the required flags such that the generated compile - # commands will create depfiles as appropriate in the Ninja file. - if env["PLATFORM"] == "win32": - env.Append(CCFLAGS=["/showIncludes"]) - else: - env.Append(CCFLAGS=["-MMD", "-MF", "${TARGET}.d"]) - - env.AddMethod(CheckNinjaCompdbExpand, "CheckNinjaCompdbExpand") - - # Provide a way for custom rule authors to easily access command - # generation. - env.AddMethod(get_generic_shell_command, "NinjaGetGenericShellCommand") - env.AddMethod(get_command, "NinjaGetCommand") - env.AddMethod(gen_get_response_file_command, "NinjaGenResponseFileProvider") - env.AddMethod(set_build_node_callback, "NinjaSetBuildNodeCallback") - - # Provides a way for users to handle custom FunctionActions they - # want to translate to Ninja. - env[NINJA_CUSTOM_HANDLERS] = {} - env.AddMethod(register_custom_handler, "NinjaRegisterFunctionHandler") - - # Provides a mechanism for inject custom Ninja rules which can - # then be mapped using NinjaRuleMapping. - env[NINJA_RULES] = {} - env.AddMethod(register_custom_rule, "NinjaRule") - - # Provides a mechanism for inject custom Ninja pools which can - # be used by providing the NINJA_POOL="name" as an - # OverrideEnvironment variable in a builder call. - env[NINJA_POOLS] = {} - env.AddMethod(register_custom_pool, "NinjaPool") - - # Add the ability to register custom NinjaRuleMappings for Command - # builders. We don't store this dictionary in the env to prevent - # accidental deletion of the CC/XXCOM mappings. You can still - # overwrite them if you really want to but you have to explicit - # about it this way. The reason is that if they were accidentally - # deleted you would get a very subtly incorrect Ninja file and - # might not catch it. - env.AddMethod(register_custom_rule_mapping, "NinjaRuleMapping") - - # TODO: change LINKCOM and SHLINKCOM to handle embedding manifest exe checks - # without relying on the SCons hacks that SCons uses by default. - if env["PLATFORM"] == "win32": - from SCons.Tool.mslink import compositeLinkAction - - if env["LINKCOM"] == compositeLinkAction: - env[ - "LINKCOM" - ] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows", "$LINKCOMSTR")}' - env[ - "SHLINKCOM" - ] = '${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES", "$SHLINKCOMSTR")}' - - # Normally in SCons actions for the Program and *Library builders - # will return "${*COM}" as their pre-subst'd command line. However - # if a user in a SConscript overwrites those values via key access - # like env["LINKCOM"] = "$( $ICERUN $)" + env["LINKCOM"] then - # those actions no longer return the "bracketted" string and - # instead return something that looks more expanded. So to - # continue working even if a user has done this we map both the - # "bracketted" and semi-expanded versions. - def robust_rule_mapping(var, rule, tool): - provider = gen_get_response_file_command(env, rule, tool) - env.NinjaRuleMapping("${" + var + "}", provider) - env.NinjaRuleMapping(env[var], provider) - - robust_rule_mapping("CCCOM", "CC", env["CC"]) - robust_rule_mapping("SHCCCOM", "CC", env["CC"]) - robust_rule_mapping("CXXCOM", "CXX", env["CXX"]) - robust_rule_mapping("SHCXXCOM", "CXX", env["CXX"]) - robust_rule_mapping("LINKCOM", "LINK", "$LINK") - robust_rule_mapping("SHLINKCOM", "LINK", "$SHLINK") - robust_rule_mapping("ARCOM", "AR", env["AR"]) - - # Make SCons node walk faster by preventing unnecessary work - env.Decider("timestamp-match") - - # Used to determine if a build generates a source file. Ninja - # requires that all generated sources are added as order_only - # dependencies to any builds that *might* use them. - env["NINJA_GENERATED_SOURCE_SUFFIXES"] = [".h", ".hpp"] - - if env["PLATFORM"] != "win32" and env.get("RANLIBCOM"): - # There is no way to translate the ranlib list action into - # Ninja so add the s flag and disable ranlib. - # - # This is equivalent to Meson. - # https://github.com/mesonbuild/meson/blob/master/mesonbuild/linkers.py#L143 - old_arflags = str(env["ARFLAGS"]) - if "s" not in old_arflags: - old_arflags += "s" - - env["ARFLAGS"] = SCons.Util.CLVar([old_arflags]) - - # Disable running ranlib, since we added 's' above - env["RANLIBCOM"] = "" - - # This is the point of no return, anything after this comment - # makes changes to SCons that are irreversible and incompatible - # with a normal SCons build. We return early if __NINJA_NO=1 has - # been given on the command line (i.e. by us in the generated - # ninja file) here to prevent these modifications from happening - # when we want SCons to do work. Everything before this was - # necessary to setup the builder and other functions so that the - # tool can be unconditionally used in the users's SCons files. - - if not exists(env): - return - - # Set a known variable that other tools can query so they can - # behave correctly during ninja generation. - env["GENERATING_NINJA"] = True - - # These methods are no-op'd because they do not work during ninja - # generation, expected to do no work, or simply fail. All of which - # are slow in SCons. So we overwrite them with no logic. - SCons.Node.FS.File.make_ready = ninja_noop - SCons.Node.FS.File.prepare = ninja_noop - SCons.Node.FS.File.push_to_cache = ninja_noop - SCons.Executor.Executor.prepare = ninja_noop - SCons.Taskmaster.Task.prepare = ninja_noop - SCons.Node.FS.File.built = ninja_noop - SCons.Node.Node.visited = ninja_noop - - # We make lstat a no-op because it is only used for SONAME - # symlinks which we're not producing. - SCons.Node.FS.LocalFS.lstat = ninja_noop - - # This is a slow method that isn't memoized. We make it a noop - # since during our generation we will never use the results of - # this or change the results. - SCons.Node.FS.is_up_to_date = ninja_noop - - # We overwrite stat and WhereIs with eternally memoized - # implementations. See the docstring of ninja_stat and - # ninja_whereis for detailed explanations. - SCons.Node.FS.LocalFS.stat = ninja_stat - SCons.Util.WhereIs = ninja_whereis - - # Monkey patch get_csig and get_contents for some classes. It - # slows down the build significantly and we don't need contents or - # content signatures calculated when generating a ninja file since - # we're not doing any SCons caching or building. - SCons.Executor.Executor.get_contents = ninja_contents( - SCons.Executor.Executor.get_contents - ) - SCons.Node.Alias.Alias.get_contents = ninja_contents( - SCons.Node.Alias.Alias.get_contents - ) - SCons.Node.FS.File.get_contents = ninja_contents(SCons.Node.FS.File.get_contents) - SCons.Node.FS.File.get_csig = ninja_csig(SCons.Node.FS.File.get_csig) - SCons.Node.FS.Dir.get_csig = ninja_csig(SCons.Node.FS.Dir.get_csig) - SCons.Node.Alias.Alias.get_csig = ninja_csig(SCons.Node.Alias.Alias.get_csig) - - # Ignore CHANGED_SOURCES and CHANGED_TARGETS. We don't want those - # to have effect in a generation pass because the generator - # shouldn't generate differently depending on the current local - # state. Without this, when generating on Windows, if you already - # had a foo.obj, you would omit foo.cpp from the response file. Do the same for UNCHANGED. - SCons.Executor.Executor._get_changed_sources = SCons.Executor.Executor._get_sources - SCons.Executor.Executor._get_changed_targets = SCons.Executor.Executor._get_targets - SCons.Executor.Executor._get_unchanged_sources = SCons.Executor.Executor._get_sources - SCons.Executor.Executor._get_unchanged_targets = SCons.Executor.Executor._get_targets - - # Replace false action messages with nothing. - env["PRINT_CMD_LINE_FUNC"] = ninja_print_conf_log - - # This reduces unnecessary subst_list calls to add the compiler to - # the implicit dependencies of targets. Since we encode full paths - # in our generated commands we do not need these slow subst calls - # as executing the command will fail if the file is not found - # where we expect it. - env["IMPLICIT_COMMAND_DEPENDENCIES"] = False - - # This makes SCons more aggressively cache MD5 signatures in the - # SConsign file. - env.SetOption("max_drift", 1) - - # The Serial job class is SIGNIFICANTLY (almost twice as) faster - # than the Parallel job class for generating Ninja files. So we - # monkey the Jobs constructor to only use the Serial Job class. - SCons.Job.Jobs.__init__ = ninja_always_serial - - # The environment variable NINJA_SYNTAX points to the - # ninja_syntax.py module from the ninja sources found here: - # https://github.com/ninja-build/ninja/blob/master/misc/ninja_syntax.py - # - # This should be vendored into the build sources and it's location - # set in NINJA_SYNTAX. This code block loads the location from - # that variable, gets the absolute path to the vendored file, gets - # it's parent directory then uses importlib to import the module - # dynamically. - ninja_syntax_file = env[NINJA_SYNTAX] - if isinstance(ninja_syntax_file, str): - ninja_syntax_file = env.File(ninja_syntax_file).get_abspath() - ninja_syntax_mod_dir = os.path.dirname(ninja_syntax_file) - sys.path.append(ninja_syntax_mod_dir) - ninja_syntax_mod_name = os.path.basename(ninja_syntax_file) - ninja_syntax = importlib.import_module(ninja_syntax_mod_name.replace(".py", "")) - - global NINJA_STATE - NINJA_STATE = NinjaState(env, ninja_syntax) - - # Here we will force every builder to use an emitter which makes the ninja - # file depend on it's target. This forces the ninja file to the bottom of - # the DAG which is required so that we walk every target, and therefore add - # it to the global NINJA_STATE, before we try to write the ninja file. - def ninja_file_depends_on_all(target, source, env): - if not any("conftest" in str(t) for t in target): - env.Depends(ninja_file, target) - return target, source - - # The "Alias Builder" isn't in the BUILDERS map so we have to - # modify it directly. - SCons.Environment.AliasBuilder.emitter = ninja_file_depends_on_all - - for _, builder in env["BUILDERS"].items(): - try: - emitter = builder.emitter - if emitter is not None: - builder.emitter = SCons.Builder.ListEmitter( - [emitter, ninja_file_depends_on_all] - ) - else: - builder.emitter = ninja_file_depends_on_all - # Users can inject whatever they want into the BUILDERS - # dictionary so if the thing doesn't have an emitter we'll - # just ignore it. - except AttributeError: - pass - - # Here we monkey patch the Task.execute method to not do a bunch of - # unnecessary work. If a build is a regular builder (i.e not a conftest and - # not our own Ninja builder) then we add it to the NINJA_STATE. Otherwise we - # build it like normal. This skips all of the caching work that this method - # would normally do since we aren't pulling any of these targets from the - # cache. - # - # In the future we may be able to use this to actually cache the build.ninja - # file once we have the upstream support for referencing SConscripts as File - # nodes. - def ninja_execute(self): - global NINJA_STATE - - target = self.targets[0] - target_name = str(target) - if target_name != ninja_file_name and "conftest" not in target_name: - NINJA_STATE.add_build(target) - else: - target.build() - - SCons.Taskmaster.Task.execute = ninja_execute - - # Make needs_execute always return true instead of determining out of - # date-ness. - SCons.Script.Main.BuildTask.needs_execute = lambda x: True - - # We will eventually need to overwrite TempFileMunge to make it - # handle persistent tempfiles or get an upstreamed change to add - # some configurability to it's behavior in regards to tempfiles. - # - # Set all three environment variables that Python's - # tempfile.mkstemp looks at as it behaves differently on different - # platforms and versions of Python. - os.environ["TMPDIR"] = env.Dir("$BUILD_DIR/response_files").get_abspath() - os.environ["TEMP"] = os.environ["TMPDIR"] - os.environ["TMP"] = os.environ["TMPDIR"] - if not os.path.isdir(os.environ["TMPDIR"]): - env.Execute(SCons.Defaults.Mkdir(os.environ["TMPDIR"])) - - env["TEMPFILE"] = NinjaNoResponseFiles diff --git a/site_scons/site_tools/ninja.py b/site_scons/site_tools/ninja.py index 84281d3e9b5..8961764d08b 100644 --- a/site_scons/site_tools/ninja.py +++ b/site_scons/site_tools/ninja.py @@ -275,6 +275,7 @@ class SConsToNinjaTranslator: return None build = {} + env = node.env if node.env else self.env # Ideally this should never happen, and we do try to filter # Ninja builders out of being sources of ninja builders but I @@ -286,18 +287,23 @@ class SConsToNinjaTranslator: build = self.handle_func_action(node, action) elif isinstance(action, SCons.Action.LazyAction): # pylint: disable=protected-access - action = action._generate_cache(node.env if node.env else self.env) + action = action._generate_cache(env) build = self.action_to_ninja_build(node, action=action) elif isinstance(action, SCons.Action.ListAction): build = self.handle_list_action(node, action) elif isinstance(action, COMMAND_TYPES): - build = get_command(node.env if node.env else self.env, node, action) + build = get_command(env, node, action) else: raise Exception("Got an unbuildable ListAction for: {}".format(str(node))) if build is not None: build["order_only"] = get_order_only(node) + if 'conftest' not in str(node): + node_callback = getattr(node.attributes, "ninja_build_callback", None) + if callable(node_callback): + node_callback(env, node, build) + return build def handle_func_action(self, node, action): @@ -417,9 +423,9 @@ class SConsToNinjaTranslator: class NinjaState: """Maintains state of Ninja build system as it's translated from SCons.""" - def __init__(self, env, writer_class): + def __init__(self, env, ninja_syntax): self.env = env - self.writer_class = writer_class + self.writer_class = ninja_syntax.Writer self.__generated = False self.translator = SConsToNinjaTranslator(env) self.generated_suffixes = env.get("NINJA_GENERATED_SOURCE_SUFFIXES", []) @@ -436,18 +442,18 @@ class NinjaState: # shell quoting on whatever platform it's run on. Here we use it # to make the SCONS_INVOCATION variable properly quoted for things # like CCFLAGS - escape = env.get("ESCAPE", lambda x: x) + scons_escape = env.get("ESCAPE", lambda x: x) self.variables = { "COPY": "cmd.exe /c 1>NUL copy" if sys.platform == "win32" else "cp", "SCONS_INVOCATION": "{} {} __NINJA_NO=1 $out".format( sys.executable, " ".join( - [escape(arg) for arg in sys.argv if arg not in COMMAND_LINE_TARGETS] + [ninja_syntax.escape(scons_escape(arg)) for arg in sys.argv if arg not in COMMAND_LINE_TARGETS] ), ), "SCONS_INVOCATION_W_TARGETS": "{} {}".format( - sys.executable, " ".join([escape(arg) for arg in sys.argv]) + sys.executable, " ".join([ninja_syntax.escape(scons_escape(arg)) for arg in sys.argv]) ), # This must be set to a global default per: # https://ninja-build.org/manual.html @@ -568,10 +574,10 @@ class NinjaState: "restat": 1, }, } - + num_jobs = self.env.get('NINJA_MAX_JOBS', self.env.GetOption("num_jobs")) self.pools = { - "local_pool": self.env.GetOption("num_jobs"), - "install_pool": self.env.GetOption("num_jobs") / 2, + "local_pool": num_jobs, + "install_pool": num_jobs / 2, "scons_pool": 1, } @@ -637,12 +643,14 @@ class NinjaState: ninja.variable("builddir", get_path(self.env['NINJA_BUILDDIR'])) for pool_name, size in self.pools.items(): - ninja.pool(pool_name, size) + ninja.pool(pool_name, min(self.env.get('NINJA_MAX_JOBS', size), size)) for var, val in self.variables.items(): ninja.variable(var, val) for rule, kwargs in self.rules.items(): + if self.env.get('NINJA_MAX_JOBS') is not None and 'pool' not in kwargs: + kwargs['pool'] = 'local_pool' ninja.rule(rule, **kwargs) generated_source_files = sorted({ @@ -942,13 +950,13 @@ def get_command_env(env): # doesn't make builds on paths with spaces (Ninja and SCons issues) # nor expanding response file paths with spaces (Ninja issue) work. value = value.replace(r' ', r'$ ') - command_env += "{}='{}' ".format(key, value) + command_env += "export {}='{}';".format(key, value) env["NINJA_ENV_VAR_CACHE"] = command_env return command_env -def gen_get_response_file_command(env, rule, tool, tool_is_dynamic=False): +def gen_get_response_file_command(env, rule, tool, tool_is_dynamic=False, custom_env={}): """Generate a response file command provider for rule name.""" # If win32 using the environment with a response file command will cause @@ -996,6 +1004,11 @@ def gen_get_response_file_command(env, rule, tool, tool_is_dynamic=False): variables[rule] = cmd if use_command_env: variables["env"] = get_command_env(env) + + for key, value in custom_env.items(): + variables["env"] += env.subst( + f"export {key}={value};", target=targets, source=sources, executor=executor + ) + " " return rule, variables, [tool_command] return get_response_file_command @@ -1103,9 +1116,18 @@ def get_command(env, node, action): # pylint: disable=too-many-branches implicit.append(provider_dep) continue + # in some case the tool could be in the local directory and be suppled without the ext + # such as in windows, so append the executable suffix and check. + prog_suffix = sub_env.get('PROGSUFFIX', '') + provider_dep_ext = provider_dep if provider_dep.endswith(prog_suffix) else provider_dep + prog_suffix + if os.path.exists(provider_dep_ext): + implicit.append(provider_dep_ext) + continue + # Many commands will assume the binary is in the path, so # we accept this as a possible input from a given command. - provider_dep_abspath = sub_env.WhereIs(provider_dep) + + provider_dep_abspath = sub_env.WhereIs(provider_dep) or sub_env.WhereIs(provider_dep, path=os.environ["PATH"]) if provider_dep_abspath: implicit.append(provider_dep_abspath) continue @@ -1182,7 +1204,7 @@ def register_custom_rule_mapping(env, pre_subst_string, rule): __NINJA_RULE_MAPPING[pre_subst_string] = rule -def register_custom_rule(env, rule, command, description="", deps=None, pool=None, use_depfile=False): +def register_custom_rule(env, rule, command, description="", deps=None, pool=None, use_depfile=False, use_response_file=False, response_file_content="$rspc"): """Allows specification of Ninja rules from inside SCons files.""" rule_obj = { "command": command, @@ -1198,6 +1220,10 @@ def register_custom_rule(env, rule, command, description="", deps=None, pool=Non if pool is not None: rule_obj["pool"] = pool + if use_response_file: + rule_obj["rspfile"] = "$out.rsp" + rule_obj["rspfile_content"] = response_file_content + env[NINJA_RULES][rule] = rule_obj @@ -1205,6 +1231,9 @@ def register_custom_pool(env, pool, size): """Allows the creation of custom Ninja pools""" env[NINJA_POOLS][pool] = size +def set_build_node_callback(env, node, callback): + if 'conftest' not in str(node): + setattr(node.attributes, "ninja_build_callback", callback) def ninja_csig(original): """Return a dummy csig""" @@ -1323,6 +1352,13 @@ def ninja_always_serial(self, num, taskmaster): self.job = SCons.Job.Serial(taskmaster) +def ninja_print_conf_log(s, target, source, env): + """Command line print only for conftest to generate a correct conf log.""" + if target and "conftest" in str(target[0]): + action = SCons.Action._ActionAction() + action.print_cmd_line(s, target, source, env) + + class NinjaNoResponseFiles(SCons.Platform.TempFileMunge): """Overwrite the __call__ method of SCons' TempFileMunge to not delete.""" @@ -1386,7 +1422,9 @@ def generate(env): # Provide a way for custom rule authors to easily access command # generation. env.AddMethod(get_generic_shell_command, "NinjaGetGenericShellCommand") + env.AddMethod(get_command, "NinjaGetCommand") env.AddMethod(gen_get_response_file_command, "NinjaGenResponseFileProvider") + env.AddMethod(set_build_node_callback, "NinjaSetBuildNodeCallback") # Provides a way for users to handle custom FunctionActions they # want to translate to Ninja. @@ -1538,7 +1576,7 @@ def generate(env): SCons.Executor.Executor._get_unchanged_targets = SCons.Executor.Executor._get_targets # Replace false action messages with nothing. - env["PRINT_CMD_LINE_FUNC"] = ninja_noop + env["PRINT_CMD_LINE_FUNC"] = ninja_print_conf_log # This reduces unnecessary subst_list calls to add the compiler to # the implicit dependencies of targets. Since we encode full paths @@ -1574,7 +1612,7 @@ def generate(env): ninja_syntax = importlib.import_module(ninja_syntax_mod_name.replace(".py", "")) global NINJA_STATE - NINJA_STATE = NinjaState(env, ninja_syntax.Writer) + NINJA_STATE = NinjaState(env, ninja_syntax) # Here we will force every builder to use an emitter which makes the ninja # file depend on it's target. This forces the ninja file to the bottom of diff --git a/src/SConscript b/src/SConscript index 0c3a05f8d81..b046e91173c 100644 --- a/src/SConscript +++ b/src/SConscript @@ -3,78 +3,19 @@ # This is the principle SConscript file, invoked by the SConstruct. Its job is # to delegate to any and all per-module SConscript files. -from functools import partial - -from site_scons.mongo import insort_wrapper - import SCons Import('env') -Import('get_option') -Import('has_option') Import('module_sconscripts') - -def shim_hack(target, source, env, inject_target=None, exclusions=None): - if exclusions is None: - exclusions = set(inject_target) - elif isinstance(exclusions, str): - exclusions = {exclusions, inject_target} - elif isinstance(exclusions, (list, set)): - exclusions = set(exclusions) - exclusions.add(inject_target) - - # If we allowed conftests to become dependent, any TryLink - # that happened after we made the below modifications would - # cause the configure steps to try to compile tcmalloc and any - # of its dependencies. Oops! - if any('conftest' in str(t) for t in target): - return target, source - - # It is possible that 'env' isn't a unique - # OverrideEnvironment, since if you didn't pass any kw args - # into your builder call, you just reuse the env you were - # called with. That could mean that we see the same - # environment here multiple times. But that is really OK, - # since the operation we are performing would be performed on - # all of them anyway. - libdeps_no_inherit = set(env.get('LIBDEPS_NO_INHERIT', [])) - exclusions.update(libdeps_no_inherit) - - if f"$BUILD_DIR/{inject_target}" not in exclusions: - lds = env.get('LIBDEPS', []) - shim_target = f"$BUILD_DIR/{inject_target}" - if shim_target not in lds: - insort_wrapper(lds, shim_target) - env['LIBDEPS'] = lds - - return target, source - - -def hack_builder_emitters(env, hack_method): - for builder_name in ('Program', 'SharedLibrary', 'LoadableModule', 'StaticLibrary'): - builder = env['BUILDERS'][builder_name] - base_emitter = builder.emitter - builder.emitter = SCons.Builder.ListEmitter([hack_method, base_emitter]) - - -if get_option("build-tools") == "next": - # Add any "global" dependencies here. This is where we make every build node - # depend on a list of other build nodes, such as an allocator or libunwind - # or libstdx or similar. - env.AppendUnique( - LIBDEPS_GLOBAL=[ - '$BUILD_DIR/third_party/shim_allocator', - ], - ) -else: - hack_builder_emitters( - env, - partial( - shim_hack, - inject_target='third_party/shim_allocator', - exclusions='gperftools/gperftools')) - +# Add any "global" dependencies here. This is where we make every build node +# depend on a list of other build nodes, such as an allocator or libunwind +# or libstdx or similar. +env.AppendUnique( + LIBDEPS_GLOBAL=[ + '$BUILD_DIR/third_party/shim_allocator', + ], +) # NOTE: We must do third_party first as it adds methods to the environment # that we need in the mongo sconscript diff --git a/src/mongo/embedded/mongo_embedded/SConscript b/src/mongo/embedded/mongo_embedded/SConscript index 283a5b3b396..5b9ab7ef339 100644 --- a/src/mongo/embedded/mongo_embedded/SConscript +++ b/src/mongo/embedded/mongo_embedded/SConscript @@ -1,13 +1,11 @@ # -*- mode: python; -*- +from functools import partial +import libdeps + Import("env") Import("get_option") -if get_option('build-tools') == 'next': - import libdeps_next as libdeps -else: - import libdeps - env = env.Clone() env.AutoInstall( @@ -34,9 +32,11 @@ if get_option('link-model') == 'static': ], ) elif get_option('link-model') == 'dynamic-sdk': - mongoEmbeddedEnv['LIBDEPS_SHLIBEMITTER'] = libdeps.make_libdeps_emitter( - 'SharedArchive', - libdeps.dependency_visibility_honored + # TODO(SERVER-59134): This fails to honor the libdeps-debug flag + mongoEmbeddedEnv['LIBDEPS_SHLIBEMITTER'] = partial( + libdeps.libdeps_emitter, + builder='SharedArchive', + visibility_map=libdeps.dependency_visibility_honored, ) mongoEmbeddedEnv.AppendUnique( diff --git a/src/mongo/embedded/stitch_support/SConscript b/src/mongo/embedded/stitch_support/SConscript index c5ee25ebc6e..8ab94cb83c9 100644 --- a/src/mongo/embedded/stitch_support/SConscript +++ b/src/mongo/embedded/stitch_support/SConscript @@ -1,13 +1,11 @@ # -*- mode: python; -*- +from functools import partial +import libdeps + Import("env") Import("get_option") -if get_option('build-tools') == 'next': - import libdeps_next as libdeps -else: - import libdeps - env = env.Clone() stitchSupportEnv = env.Clone() @@ -24,9 +22,11 @@ if get_option('link-model') == 'static': ], ) elif get_option('link-model') == 'dynamic-sdk': - stitchSupportEnv['LIBDEPS_SHLIBEMITTER'] = libdeps.make_libdeps_emitter( - 'SharedArchive', - libdeps.dependency_visibility_honored + # TODO(SERVER-59134): This fails to honor the libdeps-debug flag + stitchSupportEnv['LIBDEPS_SHLIBEMITTER'] = partial( + libdeps.libdeps_emitter, + builder='SharedArchive', + visibility_map=libdeps.dependency_visibility_honored, ) # Please see the note in ../mongo_embedded/SConscript about how to diff --git a/src/shim_crt.cpp b/src/shim_crt.cpp deleted file mode 100644 index c7fa170e3c3..00000000000 --- a/src/shim_crt.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * <http://www.mongodb.com/licensing/server-side-public-license>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ -// This file intentionally blank. shim_crt.cpp rolls together the compiler -// runtime libraries to provide a single source of those symbols to other -// compiled objects in a dynamically linked build. diff --git a/src/shim_cxx.cpp b/src/shim_cxx.cpp deleted file mode 100644 index 38ea7b481f3..00000000000 --- a/src/shim_cxx.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * <http://www.mongodb.com/licensing/server-side-public-license>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ -// This file intentionally blank. shim_cxx.cpp rolls together the C++ -// runtime libraries to provide a single source of those symbols to other -// compiled objects in a dynamically linked build. |