summaryrefslogtreecommitdiff
path: root/mach
diff options
context:
space:
mode:
authorAndi-Bogdan Postelnicu <bpostelnicu@mozilla.com>2021-11-08 12:23:52 +0000
committerAndi-Bogdan Postelnicu <bpostelnicu@mozilla.com>2021-11-08 12:23:52 +0000
commit5f81619260c3150dacf94d16bd02434e6e2c32b7 (patch)
tree1510fb664eba67c0a3ee2bb63e1be8428b3ce606 /mach
parentb454d094932c16fbd29e40d7344d2a357cc15640 (diff)
downloadnss-hg-5f81619260c3150dacf94d16bd02434e6e2c32b7.tar.gz
Bug 1738600 - sunset Coverity from NSS. r=nss-reviewers,bbeurdouche
Differential Revision: https://phabricator.services.mozilla.com/D129982
Diffstat (limited to 'mach')
-rwxr-xr-xmach296
1 files changed, 0 insertions, 296 deletions
diff --git a/mach b/mach
index 7c45990f0..a3ef38dac 100755
--- a/mach
+++ b/mach
@@ -41,277 +41,6 @@ def run_tests(test, cycles="standard", env={}, silent=False):
subprocess.check_call(command, env=os_env, stdout=stdout, stderr=stderr)
-class coverityAction(argparse.Action):
-
- def get_coverity_remote_cfg(self):
- secret_name = 'project/relman/coverity-nss'
- secrets_url = 'http://taskcluster/secrets/v1/secret/{}'.format(secret_name)
-
- print('Using symbol upload token from the secrets service: "{}"'.
- format(secrets_url))
-
- import requests
- res = requests.get(secrets_url)
- res.raise_for_status()
- secret = res.json()
- cov_config = secret['secret'] if 'secret' in secret else None
-
- if cov_config is None:
- print('Ill formatted secret for Coverity. Aborting analysis.')
- return None
-
- return cov_config
-
- def get_coverity_local_cfg(self, path):
- try:
- import yaml
- file_handler = open(path)
- config = yaml.safe_load(file_handler)
- except Exception:
- print('Unable to load coverity config from {}'.format(path))
- return None
- return config
-
- def get_cov_config(self, path):
- cov_config = None
- if self.local_config:
- cov_config = self.get_coverity_local_cfg(path)
- else:
- cov_config = self.get_coverity_remote_cfg()
-
- if cov_config is None:
- print('Unable to load Coverity config.')
- return 1
-
- self.cov_analysis_url = cov_config.get('package_url')
- self.cov_package_name = cov_config.get('package_name')
- self.cov_url = cov_config.get('server_url')
- self.cov_port = cov_config.get('server_port')
- self.cov_auth = cov_config.get('auth_key')
- self.cov_package_ver = cov_config.get('package_ver')
- self.cov_full_stack = cov_config.get('full_stack', False)
-
- return 0
-
- def download_coverity(self):
- if self.cov_url is None or self.cov_port is None or self.cov_analysis_url is None or self.cov_auth is None:
- print('Missing Coverity config options!')
- return 1
-
- COVERITY_CONFIG = '''
- {
- "type": "Coverity configuration",
- "format_version": 1,
- "settings": {
- "server": {
- "host": "%s",
- "port": %s,
- "ssl" : true,
- "on_new_cert" : "trust",
- "auth_key_file": "%s"
- },
- "stream": "NSS",
- "cov_run_desktop": {
- "build_cmd": ["%s"],
- "clean_cmd": ["%s", "-cc"],
- }
- }
- }
- '''
- # Generate the coverity.conf and auth files
- build_cmd = os.path.join(cwd, 'build.sh')
- cov_auth_path = os.path.join(self.cov_state_path, 'auth')
- cov_setup_path = os.path.join(self.cov_state_path, 'coverity.conf')
- cov_conf = COVERITY_CONFIG % (self.cov_url, self.cov_port, cov_auth_path, build_cmd, build_cmd)
-
- def download(artifact_url, target):
- import requests
- resp = requests.get(artifact_url, verify=False, stream=True)
- resp.raise_for_status()
-
- # Extract archive into destination
- with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
- tar.extractall(target)
-
- download(self.cov_analysis_url, self.cov_state_path)
-
- with open(cov_auth_path, 'w') as f:
- f.write(self.cov_auth)
-
- # Modify it's permission to 600
- os.chmod(cov_auth_path, 0o600)
-
- with open(cov_setup_path, 'a') as f:
- f.write(cov_conf)
-
- def setup_coverity(self, config_path, storage_path=None, force_download=True):
- rc = self.get_cov_config(config_path)
-
- if rc != 0:
- return rc
-
- if storage_path is None:
- # If storage_path is None we set the context of the coverity into the cwd.
- storage_path = cwd
-
- self.cov_state_path = os.path.join(storage_path, "coverity")
-
- if force_download is True or not os.path.exists(self.cov_state_path):
- shutil.rmtree(self.cov_state_path, ignore_errors=True)
- os.mkdir(self.cov_state_path)
-
- # Download everything that we need for Coverity from out private instance
- self.download_coverity()
-
- self.cov_path = os.path.join(self.cov_state_path, self.cov_package_name)
- self.cov_run_desktop = os.path.join(self.cov_path, 'bin', 'cov-run-desktop')
- self.cov_translate = os.path.join(self.cov_path, 'bin', 'cov-translate')
- self.cov_configure = os.path.join(self.cov_path, 'bin', 'cov-configure')
- self.cov_work_path = os.path.join(self.cov_state_path, 'data-coverity')
- self.cov_idir_path = os.path.join(self.cov_work_path, self.cov_package_ver, 'idir')
-
- if not os.path.exists(self.cov_path) or \
- not os.path.exists(self.cov_run_desktop) or \
- not os.path.exists(self.cov_translate) or \
- not os.path.exists(self.cov_configure):
- print('Missing Coverity in {}'.format(self.cov_path))
- return 1
-
- return 0
-
- def run_process(self, args, cwd=cwd):
- proc = subprocess.Popen(args, cwd=cwd)
- status = None
- while status is None:
- try:
- status = proc.wait()
- except KeyboardInterrupt:
- pass
- return status
-
- def cov_is_file_in_source(self, abs_path):
- if os.path.islink(abs_path):
- abs_path = os.path.realpath(abs_path)
- return abs_path
-
- def dump_cov_artifact(self, cov_results, source, output):
- import json
-
- def relpath(path):
- '''Build path relative to repository root'''
- if path.startswith(cwd):
- return os.path.relpath(path, cwd)
- return path
-
- # Parse Coverity json into structured issues
- with open(cov_results) as f:
- result = json.load(f)
-
- # Parse the issues to a standard json format
- issues_dict = {'files': {}}
-
- files_list = issues_dict['files']
-
- def build_element(issue):
- # We look only for main event
- event_path = next((event for event in issue['events'] if event['main'] is True), None)
-
- dict_issue = {
- 'line': issue['mainEventLineNumber'],
- 'flag': issue['checkerName'],
- 'message': event_path['eventDescription'],
- 'extra': {
- 'category': issue['checkerProperties']['category'],
- 'stateOnServer': issue['stateOnServer'],
- 'stack': []
- }
- }
-
- # Embed all events into extra message
- for event in issue['events']:
- dict_issue['extra']['stack'].append({'file_path': relpath(event['strippedFilePathname']),
- 'line_number': event['lineNumber'],
- 'path_type': event['eventTag'],
- 'description': event['eventDescription']})
-
- return dict_issue
-
- for issue in result['issues']:
- path = self.cov_is_file_in_source(issue['strippedMainEventFilePathname'])
- if path is None:
- # Since we skip a result we should log it
- print('Skipping CID: {0} from file: {1} since it\'s not related with the current patch.'.format(
- issue['stateOnServer']['cid'], issue['strippedMainEventFilePathname']))
- continue
- # If path does not start with `cwd` skip it
- if not path.startswith(cwd):
- continue
- path = relpath(path)
- if path in files_list:
- files_list[path]['warnings'].append(build_element(issue))
- else:
- files_list[path] = {'warnings': [build_element(issue)]}
-
- with open(output, 'w') as f:
- json.dump(issues_dict, f)
-
- def mutate_paths(self, paths):
- for index in xrange(len(paths)):
- paths[index] = os.path.abspath(paths[index])
-
- def __call__(self, parser, args, paths, option_string=None):
- self.local_config = True
- config_path = args.config
- storage_path = args.storage
-
- have_paths = True
- if len(paths) == 0:
- have_paths = False
- print('No files have been specified for analysis, running Coverity on the entire project.')
-
- self.mutate_paths(paths)
-
- if config_path is None:
- self.local_config = False
- print('No coverity config path has been specified, so running in automation.')
- if 'NSS_AUTOMATION' not in os.environ:
- print('Coverity based static-analysis cannot be ran outside automation.')
- return 1
-
- rc = self.setup_coverity(config_path, storage_path, args.force)
- if rc != 0:
- return 1
-
- # First run cov-run-desktop --setup in order to setup the analysis env
- cmd = [self.cov_run_desktop, '--setup']
- print('Running {} --setup'.format(self.cov_run_desktop))
-
- rc = self.run_process(args=cmd, cwd=self.cov_path)
-
- if rc != 0:
- print('Running {} --setup failed!'.format(self.cov_run_desktop))
- return rc
-
- cov_result = os.path.join(self.cov_state_path, 'cov-results.json')
-
- # Once the capture is performed we need to do the actual Coverity Desktop analysis
- if have_paths:
- cmd = [self.cov_run_desktop, '--json-output-v6', cov_result] + paths
- else:
- cmd = [self.cov_run_desktop, '--json-output-v6', cov_result, '--analyze-captured-source']
-
- print('Running Coverity Analysis for {}'.format(cmd))
-
- rc = self.run_process(cmd, cwd=self.cov_state_path)
-
- if rc != 0:
- print('Coverity Analysis failed!')
-
- # On automation, like try, we want to build an artifact with the results.
- if 'NSS_AUTOMATION' in os.environ:
- self.dump_cov_artifact(cov_result, cov_result, "/home/worker/nss/coverity/coverity.json")
-
-
class cfAction(argparse.Action):
docker_command = None
restorecon = None
@@ -535,31 +264,6 @@ def parse_arguments():
help="Specify files or directories to run clang-format on",
action=cfAction)
- parser_sa = subparsers.add_parser(
- 'static-analysis',
- help="""
- Run static-analysis tools based on coverity.
-
- By default this runs only on automation and provides a list of issues that
- are only present locally.
- """)
- parser_sa.add_argument(
- '--config', help='Path to Coverity config file. Only used for local runs.',
- default=None)
- parser_sa.add_argument(
- '--storage', help="""
- Path where to store Coverity binaries and results. If none, the base repository will be used.
- """,
- default=None)
- parser_sa.add_argument(
- '--force', help='Force the re-download of the coverity artefact.',
- action='store_true')
- parser_sa.add_argument(
- '<file>',
- nargs='*',
- help="Specify files to run Coverity on. If no files are specified the analysis will check the entire project.",
- action=coverityAction)
-
parser_test = subparsers.add_parser(
'tests', help='Run tests through tests/all.sh.')
tests = [