summaryrefslogtreecommitdiff
path: root/coverage/data.py
blob: 68ba7ec3301771515c87803af65dcf3c999a2f62 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

"""Coverage data for coverage.py.

This file had the 4.x JSON data support, which is now gone.  This file still
has storage-agnostic helpers, and is kept to avoid changing too many imports.
CoverageData is now defined in sqldata.py, and imported here to keep the
imports working.

"""

import glob
import os.path

from coverage.exceptions import CoverageException
from coverage.misc import file_be_gone
from coverage.sqldata import CoverageData


def line_counts(data, fullpath=False):
    """Return a dict summarizing the line coverage data.

    Keys are based on the file names, and values are the number of executed
    lines.  If `fullpath` is true, then the keys are the full pathnames of
    the files, otherwise they are the basenames of the files.

    Returns a dict mapping file names to counts of lines.

    """
    summ = {}
    if fullpath:
        filename_fn = lambda f: f
    else:
        filename_fn = os.path.basename
    for filename in data.measured_files():
        summ[filename_fn(filename)] = len(data.lines(filename))
    return summ


def add_data_to_hash(data, filename, hasher):
    """Contribute `filename`'s data to the `hasher`.

    `hasher` is a `coverage.misc.Hasher` instance to be updated with
    the file's data.  It should only get the results data, not the run
    data.

    """
    if data.has_arcs():
        hasher.update(sorted(data.arcs(filename) or []))
    else:
        hasher.update(sorted(data.lines(filename) or []))
    hasher.update(data.file_tracer(filename))


def combine_parallel_data(
    data, aliases=None, data_paths=None, strict=False, keep=False, message=None,
):
    """Combine a number of data files together.

    Treat `data.filename` as a file prefix, and combine the data from all
    of the data files starting with that prefix plus a dot.

    If `aliases` is provided, it's a `PathAliases` object that is used to
    re-map paths to match the local machine's.

    If `data_paths` is provided, it is a list of directories or files to
    combine.  Directories are searched for files that start with
    `data.filename` plus dot as a prefix, and those files are combined.

    If `data_paths` is not provided, then the directory portion of
    `data.filename` is used as the directory to search for data files.

    Unless `keep` is True every data file found and combined is then deleted from disk. If a file
    cannot be read, a warning will be issued, and the file will not be
    deleted.

    If `strict` is true, and no files are found to combine, an error is
    raised.

    """
    # Because of the os.path.abspath in the constructor, data_dir will
    # never be an empty string.
    data_dir, local = os.path.split(data.base_filename())
    localdot = local + '.*'

    data_paths = data_paths or [data_dir]
    files_to_combine = []
    for p in data_paths:
        if os.path.isfile(p):
            files_to_combine.append(os.path.abspath(p))
        elif os.path.isdir(p):
            pattern = os.path.join(os.path.abspath(p), localdot)
            files_to_combine.extend(glob.glob(pattern))
        else:
            raise CoverageException(f"Couldn't combine from non-existent path '{p}'")

    if strict and not files_to_combine:
        raise CoverageException("No data to combine")

    files_combined = 0
    for f in files_to_combine:
        if f == data.data_filename():
            # Sometimes we are combining into a file which is one of the
            # parallel files.  Skip that file.
            if data._debug.should('dataio'):
                data._debug.write(f"Skipping combining ourself: {f!r}")
            continue
        if data._debug.should('dataio'):
            data._debug.write(f"Combining data file {f!r}")
        try:
            new_data = CoverageData(f, debug=data._debug)
            new_data.read()
        except CoverageException as exc:
            if data._warn:
                # The CoverageException has the file name in it, so just
                # use the message as the warning.
                data._warn(str(exc))
        else:
            data.update(new_data, aliases=aliases)
            files_combined += 1
            if message:
                message(f"Combined data file {os.path.relpath(f)}")
            if not keep:
                if data._debug.should('dataio'):
                    data._debug.write(f"Deleting combined data file {f!r}")
                file_be_gone(f)

    if strict and not files_combined:
        raise CoverageException("No usable data files")