summaryrefslogtreecommitdiff
path: root/tests/frontend/logging.py
blob: f2be66e999f2b5f0c3ecf8b6b562a888c4d1b560 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name

import os
import re

import pytest

from buildstream.testing import create_repo

from buildstream import _yaml
from buildstream.exceptions import ErrorDomain
from buildstream.testing import cli  # pylint: disable=unused-import

# Project directory
DATA_DIR = os.path.dirname(os.path.realpath(__file__))


@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
def test_default_logging(cli, tmpdir, datafiles):
    project = str(datafiles)
    bin_files_path = os.path.join(project, "files", "bin-files")
    element_path = os.path.join(project, "elements")
    element_name = "fetch-test-git.bst"

    # Create our repo object of the given source type with
    # the bin files, and then collect the initial ref.
    #
    repo = create_repo("git", str(tmpdir))
    ref = repo.create(bin_files_path)

    # Write out our test target
    element = {"kind": "import", "sources": [repo.source_config(ref=ref)]}
    _yaml.roundtrip_dump(element, os.path.join(element_path, element_name))

    # Now try to fetch it
    result = cli.run(project=project, args=["source", "fetch", element_name])
    result.assert_success()

    m = re.search(r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr)
    assert m is not None


@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
def test_custom_logging(cli, tmpdir, datafiles):
    project = str(datafiles)
    bin_files_path = os.path.join(project, "files", "bin-files")
    element_path = os.path.join(project, "elements")
    element_name = "fetch-test-git.bst"

    custom_log_format = "%{elapsed},%{elapsed-us},%{wallclock},%{wallclock-us},%{key},%{element},%{action},%{message}"
    user_config = {"logging": {"message-format": custom_log_format}}
    cli.configure(user_config)

    # Create our repo object of the given source type with
    # the bin files, and then collect the initial ref.
    #
    repo = create_repo("git", str(tmpdir))
    ref = repo.create(bin_files_path)

    # Write out our test target
    element = {"kind": "import", "sources": [repo.source_config(ref=ref)]}
    _yaml.roundtrip_dump(element, os.path.join(element_path, element_name))

    # Now try to fetch it
    result = cli.run(project=project, args=["source", "fetch", element_name])
    result.assert_success()

    m = re.search(
        r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6}\s*,.*" r",SUCCESS,Checking sources",
        result.stderr,
    )
    assert m is not None


@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
def test_failed_build_listing(cli, datafiles):
    project = str(datafiles)
    element_names = []
    for i in range(3):
        element_name = "testfail-{}.bst".format(i)
        element_path = os.path.join("elements", element_name)
        element = {"kind": "script", "config": {"commands": ["false"]}}
        _yaml.roundtrip_dump(element, os.path.join(project, element_path))
        element_names.append(element_name)
    result = cli.run(project=project, args=["--on-error=continue", "build", *element_names])
    result.assert_main_error(ErrorDomain.STREAM, None)

    # Check that we re-print the failure summaries only in the "Failure Summary"
    # section.
    # e.g.
    #
    # Failure Summary
    #   testfail-0.bst:
    #     [00:00:00][44f1b8c3][   build:testfail-0.bst                ] FAILURE Running 'commands'
    #
    failure_heading_pos = re.search(r"^Failure Summary$", result.stderr, re.MULTILINE).start()
    pipeline_heading_pos = re.search(r"^Pipeline Summary$", result.stderr, re.MULTILINE).start()
    failure_summary_range = range(failure_heading_pos, pipeline_heading_pos)
    matches = tuple(re.finditer(r"^\s+testfail-.\.bst:$", result.stderr, re.MULTILINE))
    for m in matches:
        assert m.start() in failure_summary_range
        assert m.end() in failure_summary_range
    assert len(matches) == 3  # each element should be matched once.

    # Note that if we mess up the 'element_name' of Messages, they won't be printed
    # with the name of the relevant element, e.g. 'testfail-1.bst'. Check that
    # they have the name as expected.
    pattern = r"\[..:..:..\] FAILURE \[.*\] testfail-.\.bst: Staged artifacts do not provide command 'sh'"
    assert len(re.findall(pattern, result.stderr, re.MULTILINE)) == 6  # each element should be matched twice.


# This test ensures that we get the expected element name and cache key in log lines.
#
#   * The master build log should show the element name and cache key
#     of the task element, i.e. the element currently being built, not
#     the element issuing the message.
#
#   * In the individual task log, we expect to see the name and cache
#     key of the element issuing messages, since the entire log file
#     is contextual to the task, it makes more sense to provide the
#     full context of the element issuing the log in this case.
#
# The order and format of log lines are UI and as such might change
# in which case this test needs to be adapted, the important part of
# this test is only that we see task elements reported in the aggregated
# master log file, and that we see message originating elements in
# a task specific log file.
#
@pytest.mark.datafiles(os.path.join(DATA_DIR, "logging"))
def test_log_line_element_names(cli, datafiles):

    project = str(datafiles)

    # First discover the cache keys, this will give us a dictionary
    # where we can look up the brief cache key (as displayed in the logs)
    # by the element name.
    #
    keys = {}
    result = cli.run(project=project, args=["show", "--deps", "all", "--format", "%{name}||%{key}", "logtest.bst"])
    result.assert_success()
    lines = result.output.splitlines()
    for line in lines:
        split = line.split(sep="||")
        keys[split[0]] = split[1]

    # Run a build of the import elements, so that we can observe only the build of the logtest.bst element
    # at the end.
    #
    result = cli.run(project=project, args=["build", "foo.bst", "bar.bst"])

    # Now run the build
    #
    result = cli.run(project=project, args=["build", "logtest.bst"])
    result.assert_success()
    master_log = result.stderr

    # Now run `bst artifact log` to conveniently collect the build log so we can compare it.
    logfiles = os.path.join(project, "logfiles")
    logfile = os.path.join(project, "logfiles", "logtest.log")
    result = cli.run(project=project, args=["artifact", "log", "--out", logfiles, "logtest.bst"])
    result.assert_success()

    with open(logfile, "r") as f:
        task_log = f.read()

    #########################################################
    #              Parse and assert master log              #
    #########################################################

    # In the master log, we're looking for lines like this:
    #
    # [--:--:--][10dc28c5][   build:logtest.bst                   ] STATUS  Staging bar.bst/40ff1c5a
    # [--:--:--][10dc28c5][   build:logtest.bst                   ] STATUS  Staging foo.bst/e5ab75a1

    # Capture (log key, element name, staged element name, staged element key)
    pattern = r"\[--:--:--\]\[(\S*)\]\[\s*build:(\S*)\s*] STATUS  Staging\s*(\S*)/(\S*)"
    lines = re.findall(pattern, master_log, re.MULTILINE)

    # We staged 2 elements
    assert len(lines) == 2

    # Assert that the logtest.bst element name and it's cache key is used in the master log
    for line in lines:
        log_key, log_name, staged_name, staged_key = line

        assert log_name == "logtest.bst"
        assert log_key == keys["logtest.bst"]

    #########################################################
    #              Parse and assert artifact log            #
    #########################################################

    # In the task specific build log, we're looking for lines like this:
    #
    # [--:--:--] STATUS  [40ff1c5a] bar.bst: Staging bar.bst/40ff1c5a
    # [--:--:--] STATUS  [e5ab75a1] foo.bst: Staging foo.bst/e5ab75a1

    # Capture (log key, element name, staged element name, staged element key)
    pattern = r"\[--:--:--\] STATUS  \[(\S*)\] (\S*): Staging\s*(\S*)/(\S*)"
    lines = re.findall(pattern, task_log, re.MULTILINE)

    # We staged 2 elements
    assert len(lines) == 2

    # Assert that the originating element names and cache keys are used in
    # log lines when recorded to the task specific log file
    for line in lines:
        log_key, log_name, staged_name, staged_key = line

        assert log_name == staged_name
        assert log_key == staged_key
        assert log_key == keys[log_name]