summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Dawson <randomascii@users.noreply.github.com>2022-10-24 16:33:40 -0700
committerGitHub <noreply@github.com>2022-10-24 16:33:40 -0700
commit35dc1cca70fbe747d9ec2601133154275213adb5 (patch)
treee0855fbf0ee4173aa353e5d2220a6e3308d284e2
parent65c82f4b99f29db594d6c65fee87f659d0cf94c0 (diff)
parent023d8a84311257a58b47191d229a58de8733c75b (diff)
downloadninja-35dc1cca70fbe747d9ec2601133154275213adb5.tar.gz
Merge branch 'master' into steady_clock_now_good
-rw-r--r--.github/workflows/linux.yml60
-rw-r--r--.gitignore4
-rw-r--r--CMakeLists.txt32
-rw-r--r--CONTRIBUTING.md1
-rw-r--r--RELEASING.md (renamed from RELEASING)36
-rw-r--r--[-rwxr-xr-x]configure.py29
-rw-r--r--doc/manual.asciidoc38
-rwxr-xr-xmisc/output_test.py18
-rw-r--r--misc/zsh-completion37
-rw-r--r--src/build.cc88
-rw-r--r--src/build.h1
-rw-r--r--src/build_log.cc10
-rw-r--r--src/build_log.h2
-rw-r--r--src/build_log_test.cc6
-rw-r--r--src/build_test.cc320
-rw-r--r--src/deps_log.cc2
-rw-r--r--src/deps_log.h2
-rw-r--r--src/deps_log_test.cc43
-rw-r--r--src/disk_interface.cc13
-rw-r--r--src/disk_interface_test.cc2
-rw-r--r--src/graph.cc75
-rw-r--r--src/graph.h7
-rw-r--r--src/graph_test.cc33
-rw-r--r--src/hash_map.h44
-rw-r--r--src/line_printer.cc8
-rw-r--r--src/missing_deps.h7
-rw-r--r--src/ninja.cc82
-rw-r--r--src/parser.cc7
-rw-r--r--src/status.h4
-rw-r--r--src/util.cc191
-rw-r--r--src/version.cc2
31 files changed, 981 insertions, 223 deletions
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index 3c93e00..57a569e 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -147,3 +147,63 @@ jobs:
./ninja_test --gtest_filter=-SubprocessTest.SetWithLots
python3 misc/ninja_syntax_test.py
./misc/output_test.py
+
+ build-aarch64:
+ name: Build Linux ARM64
+ runs-on: [ubuntu-latest]
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Build
+ uses: uraimo/run-on-arch-action@v2
+ with:
+ arch: aarch64
+ distro: ubuntu18.04
+ githubToken: ${{ github.token }}
+ dockerRunArgs: |
+ --volume "${PWD}:/ninja"
+ install: |
+ apt-get update -q -y
+ apt-get install -q -y make gcc g++ libasan5 clang-tools curl p7zip-full file
+ run: |
+ set -x
+ cd /ninja
+
+ # INSTALL CMAKE
+ CMAKE_VERSION=3.23.4
+ curl -L -O https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-aarch64.sh
+ chmod +x cmake-${CMAKE_VERSION}-Linux-aarch64.sh
+ ./cmake-${CMAKE_VERSION}-Linux-aarch64.sh --skip-license --prefix=/usr/local
+
+ # BUILD
+ cmake -DCMAKE_BUILD_TYPE=Release -B release-build
+ cmake --build release-build --parallel --config Release
+ strip release-build/ninja
+ file release-build/ninja
+
+ # TEST
+ pushd release-build
+ ./ninja_test
+ popd
+
+ # CREATE ARCHIVE
+ mkdir artifact
+ 7z a artifact/ninja-linux-aarch64.zip ./release-build/ninja
+
+ # Upload ninja binary archive as an artifact
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: ninja-binary-archives
+ path: artifact
+
+ - name: Upload release asset
+ if: github.event.action == 'published'
+ uses: actions/upload-release-asset@v1.0.1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ github.event.release.upload_url }}
+ asset_path: ./artifact/ninja-linux-aarch64.zip
+ asset_name: ninja-linux-aarch64.zip
+ asset_content_type: application/zip
diff --git a/.gitignore b/.gitignore
index fdca015..ca36ec8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,3 +43,7 @@
/.clangd/
/compile_commands.json
/.cache/
+
+# Visual Studio files
+/.vs/
+/out/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 69ce1ab..76bfb62 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,7 +3,9 @@ cmake_minimum_required(VERSION 3.15)
include(CheckSymbolExists)
include(CheckIPOSupported)
-project(ninja)
+option(NINJA_BUILD_BINARY "Build ninja binary" ON)
+
+project(ninja CXX)
# --- optional link-time optimization
check_ipo_supported(RESULT lto_supported OUTPUT error)
@@ -126,10 +128,18 @@ if(WIN32)
src/getopt.c
src/minidump-win32.cc
)
+ # Build getopt.c, which can be compiled as either C or C++, as C++
+ # so that build environments which lack a C compiler, but have a C++
+ # compiler may build ninja.
+ set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX)
else()
target_sources(libninja PRIVATE src/subprocess-posix.cc)
if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
target_sources(libninja PRIVATE src/getopt.c)
+ # Build getopt.c, which can be compiled as either C or C++, as C++
+ # so that build environments which lack a C compiler, but have a C++
+ # compiler may build ninja.
+ set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX)
endif()
# Needed for perfstat_cpu_total
@@ -152,11 +162,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
endif()
# Main executable is library plus main() function.
-add_executable(ninja src/ninja.cc)
-target_link_libraries(ninja PRIVATE libninja libninja-re2c)
+if(NINJA_BUILD_BINARY)
+ add_executable(ninja src/ninja.cc)
+ target_link_libraries(ninja PRIVATE libninja libninja-re2c)
-if(WIN32)
- target_sources(ninja PRIVATE windows/ninja.manifest)
+ if(WIN32)
+ target_sources(ninja PRIVATE windows/ninja.manifest)
+ endif()
endif()
# Adds browse mode into the ninja binary if it's supported by the host platform.
@@ -175,8 +187,10 @@ if(platform_supports_ninja_browse)
VERBATIM
)
- target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE)
- target_sources(ninja PRIVATE src/browse.cc)
+ if(NINJA_BUILD_BINARY)
+ target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE)
+ target_sources(ninja PRIVATE src/browse.cc)
+ endif()
set_source_files_properties(src/browse.cc
PROPERTIES
OBJECT_DEPENDS "${PROJECT_BINARY_DIR}/build/browse_py.h"
@@ -236,4 +250,6 @@ if(BUILD_TESTING)
add_test(NAME NinjaTest COMMAND ninja_test)
endif()
-install(TARGETS ninja DESTINATION bin)
+if(NINJA_BUILD_BINARY)
+ install(TARGETS ninja)
+endif()
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 69dac74..f003750 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -21,7 +21,6 @@ a few additions:
please try to avoid relying on it and instead whenever possible use `std::`.
However, please do not change existing code simply to add `std::` unless your
contribution already needs to change that line of code anyway.
-* All source files should have the Google Inc. license header.
* Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to
arguments).
* It's not necessary to document each argument, especially when they're
diff --git a/RELEASING b/RELEASING.md
index 0b03341..4e3a4bd 100644
--- a/RELEASING
+++ b/RELEASING.md
@@ -1,33 +1,41 @@
Notes to myself on all the steps to make for a Ninja release.
-Push new release branch:
+### Push new release branch:
1. Run afl-fuzz for a day or so and run ninja_test
2. Consider sending a heads-up to the ninja-build mailing list first
3. Make sure branches 'master' and 'release' are synced up locally
4. Update src/version.cc with new version (with ".git"), then
- git commit -am 'mark this 1.5.0.git'
+ ```
+ git commit -am 'mark this 1.5.0.git'
+ ```
5. git checkout release; git merge master
6. Fix version number in src/version.cc (it will likely conflict in the above)
7. Fix version in doc/manual.asciidoc (exists only on release branch)
8. commit, tag, push (don't forget to push --tags)
- git commit -am v1.5.0; git push origin release
- git tag v1.5.0; git push --tags
- # Push the 1.5.0.git change on master too:
- git checkout master; git push origin master
+ ```
+ git commit -am v1.5.0; git push origin release
+ git tag v1.5.0; git push --tags
+ # Push the 1.5.0.git change on master too:
+ git checkout master; git push origin master
+ ```
9. Construct release notes from prior notes
- credits: git shortlog -s --no-merges REV..
-Release on github:
-1. https://github.com/blog/1547-release-your-software
- Add binaries to https://github.com/ninja-build/ninja/releases
+ credits: `git shortlog -s --no-merges REV..`
-Make announcement on mailing list:
+
+### Release on GitHub:
+1. Go to [Tags](https://github.com/ninja-build/ninja/tags)
+2. Open the newly created tag and select "Create release from tag"
+3. Create the release which will trigger a build which automatically attaches
+ the binaries
+
+### Make announcement on mailing list:
1. copy old mail
-Update website:
+### Update website:
1. Make sure your ninja checkout is on the v1.5.0 tag
2. Clone https://github.com/ninja-build/ninja-build.github.io
3. In that repo, `./update-docs.sh`
4. Update index.html with newest version and link to release notes
-5. git commit -m 'run update-docs.sh, 1.5.0 release'
-6. git push origin master
+5. `git commit -m 'run update-docs.sh, 1.5.0 release'`
+6. `git push origin master`
diff --git a/configure.py b/configure.py
index 99a2c86..09c5b28 100755..100644
--- a/configure.py
+++ b/configure.py
@@ -305,9 +305,18 @@ if platform.is_msvc():
else:
n.variable('ar', configure_env.get('AR', 'ar'))
+def search_system_path(file_name):
+ """Find a file in the system path."""
+ for dir in os.environ['path'].split(';'):
+ path = os.path.join(dir, file_name)
+ if os.path.exists(path):
+ return path
+
# Note that build settings are separately specified in CMakeLists.txt and
# these lists should be kept in sync.
if platform.is_msvc():
+ if not search_system_path('cl.exe'):
+ raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS')
cflags = ['/showIncludes',
'/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
@@ -478,7 +487,7 @@ n.comment('the depfile parser and ninja lexers are generated using re2c.')
def has_re2c():
try:
proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
- return int(proc.communicate()[0], 10) >= 1103
+ return int(proc.communicate()[0], 10) >= 1503
except OSError:
return False
if has_re2c():
@@ -489,20 +498,31 @@ if has_re2c():
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
- print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
+ print("warning: A compatible version of re2c (>= 0.15.3) was not found; "
"changes to src/*.in.cc will not affect your build.")
n.newline()
-n.comment('Core source files all build into ninja library.')
cxxvariables = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja.pdb')]
+
+n.comment('Generate a library for `ninja-re2c`.')
+re2c_objs = []
+for name in ['depfile_parser', 'lexer']:
+ re2c_objs += cxx(name, variables=cxxvariables)
+if platform.is_msvc():
+ n.build(built('ninja-re2c.lib'), 'ar', re2c_objs)
+else:
+ n.build(built('libninja-re2c.a'), 'ar', re2c_objs)
+n.newline()
+
+n.comment('Core source files all build into ninja library.')
+objs.extend(re2c_objs)
for name in ['build',
'build_log',
'clean',
'clparser',
'debug_flags',
- 'depfile_parser',
'deps_log',
'disk_interface',
'dyndep',
@@ -512,7 +532,6 @@ for name in ['build',
'graph',
'graphviz',
'json',
- 'lexer',
'line_printer',
'manifest_parser',
'metrics',
diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc
index 9e2bec5..214dca4 100644
--- a/doc/manual.asciidoc
+++ b/doc/manual.asciidoc
@@ -257,8 +257,8 @@ than the _depth_ mode.
executed in order, may be used to rebuild those targets, assuming that all
output files are out of date.
-`inputs`:: given a list of targets, print a list of all inputs which are used
-to rebuild those targets.
+`inputs`:: given a list of targets, print a list of all inputs used to
+rebuild those targets.
_Available since Ninja 1.11._
`clean`:: remove built files. By default it removes all built files
@@ -312,6 +312,37 @@ file. _Available since Ninja 1.10._
to pass to +ninja -t targets rule _name_+ or +ninja -t compdb+. Adding the `-d`
flag also prints the description of the rules.
+`msvc`:: Available on Windows hosts only.
+Helper tool to invoke the `cl.exe` compiler with a pre-defined set of
+environment variables, as in:
++
+----
+ninja -t msvc -e ENVFILE -- cl.exe <arguments>
+----
++
+Where `ENVFILE` is a binary file that contains an environment block suitable
+for CreateProcessA() on Windows (i.e. a series of zero-terminated strings that
+look like NAME=VALUE, followed by an extra zero terminator). Note that this uses
+the local codepage encoding.
+
+This tool also supports a deprecated way of parsing the compiler's output when
+the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it.
++
+---
+ninja -t msvc -o DEPFILE [-p STRING] -- cl.exe /showIncludes <arguments>
+---
++
+
+When using this option, `-p STRING` can be used to pass the localized line prefix
+that `cl.exe` uses to output dependency information. For English-speaking regions
+this is `"Note: including file: "` without the double quotes, but will be different
+for other regions.
+
+Note that Ninja supports this natively now, with the use of `deps = msvc` and
+`msvc_deps_prefix` in Ninja files. Native support also avoids launching an extra
+tool process each time the compiler must be called, which can speed up builds
+noticeably on Windows.
+
`wincodepage`:: Available on Windows hosts (_since Ninja 1.11_).
Prints the Windows code page whose encoding is expected in the build file.
The output has the form:
@@ -1015,6 +1046,9 @@ relative path, pointing to the same file, are considered different by Ninja.
[[validations]]
Validations
~~~~~~~~~~~
+
+_Available since Ninja 1.11._
+
Validations listed on the build line cause the specified files to be
added to the top level of the build graph (as if they were specified
on the Ninja command line) whenever the build line is a transitive
diff --git a/misc/output_test.py b/misc/output_test.py
index 45698f1..141716c 100755
--- a/misc/output_test.py
+++ b/misc/output_test.py
@@ -134,5 +134,23 @@ red
output = run(Output.BUILD_SIMPLE_ECHO, flags='-C$PWD', pipe=True)
self.assertEqual(output.splitlines()[0][:25], "ninja: Entering directory")
+ def test_tool_inputs(self):
+ plan = '''
+rule cat
+ command = cat $in $out
+build out1 : cat in1
+build out2 : cat in2 out1
+build out3 : cat out2 out1 | implicit || order_only
+'''
+ self.assertEqual(run(plan, flags='-t inputs out3'),
+'''implicit
+in1
+in2
+order_only
+out1
+out2
+''')
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/misc/zsh-completion b/misc/zsh-completion
index 4cee3b8..d439df3 100644
--- a/misc/zsh-completion
+++ b/misc/zsh-completion
@@ -16,7 +16,7 @@
# Add the following to your .zshrc to tab-complete ninja targets
# fpath=(path/to/ninja/misc/zsh-completion $fpath)
-__get_targets() {
+(( $+functions[_ninja-get-targets] )) || _ninja-get-targets() {
dir="."
if [ -n "${opt_args[-C]}" ];
then
@@ -31,42 +31,45 @@ __get_targets() {
eval ${targets_command} 2>/dev/null | cut -d: -f1
}
-__get_tools() {
- ninja -t list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2
+(( $+functions[_ninja-get-tools] )) || _ninja-get-tools() {
+ # remove the first line; remove the leading spaces; replace spaces with colon
+ ninja -t list 2> /dev/null | sed -e '1d;s/^ *//;s/ \+/:/'
}
-__get_modes() {
- ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
+(( $+functions[_ninja-get-modes] )) || _ninja-get-modes() {
+ # remove the first line; remove the last line; remove the leading spaces; replace spaces with colon
+ ninja -d list 2> /dev/null | sed -e '1d;$d;s/^ *//;s/ \+/:/'
}
-__modes() {
+(( $+functions[_ninja-modes] )) || _ninja-modes() {
local -a modes
- modes=(${(fo)"$(__get_modes)"})
+ modes=(${(fo)"$(_ninja-get-modes)"})
_describe 'modes' modes
}
-__tools() {
+(( $+functions[_ninja-tools] )) || _ninja-tools() {
local -a tools
- tools=(${(fo)"$(__get_tools)"})
+ tools=(${(fo)"$(_ninja-get-tools)"})
_describe 'tools' tools
}
-__targets() {
+(( $+functions[_ninja-targets] )) || _ninja-targets() {
local -a targets
- targets=(${(fo)"$(__get_targets)"})
+ targets=(${(fo)"$(_ninja-get-targets)"})
_describe 'targets' targets
}
_arguments \
- {-h,--help}'[Show help]' \
- '--version[Print ninja version]' \
+ '(- *)'{-h,--help}'[Show help]' \
+ '(- *)--version[Print ninja version]' \
'-C+[Change to directory before doing anything else]:directories:_directories' \
'-f+[Specify input build file (default=build.ninja)]:files:_files' \
'-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \
'-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \
'-k+[Keep going until N jobs fail (default=1)]:number of jobs' \
'-n[Dry run (do not run commands but act like they succeeded)]' \
- '-v[Show all command lines while building]' \
- '-d+[Enable debugging (use -d list to list modes)]:modes:__modes' \
- '-t+[Run a subtool (use -t list to list subtools)]:tools:__tools' \
- '*::targets:__targets'
+ '(-v --verbose --quiet)'{-v,--verbose}'[Show all command lines while building]' \
+ "(-v --verbose --quiet)--quiet[Don't show progress status, just command output]" \
+ '-d+[Enable debugging (use -d list to list modes)]:modes:_ninja-modes' \
+ '-t+[Run a subtool (use -t list to list subtools)]:tools:_ninja-tools' \
+ '*::targets:_ninja-targets'
diff --git a/src/build.cc b/src/build.cc
index 6f11ed7..76ff93a 100644
--- a/src/build.cc
+++ b/src/build.cc
@@ -518,6 +518,10 @@ Builder::Builder(State* state, const BuildConfig& config,
start_time_millis_(start_time_millis), disk_interface_(disk_interface),
scan_(state, build_log, deps_log, disk_interface,
&config_.depfile_parser_options) {
+ lock_file_path_ = ".ninja_lock";
+ string build_dir = state_->bindings_.LookupVariable("builddir");
+ if (!build_dir.empty())
+ lock_file_path_ = build_dir + "/" + lock_file_path_;
}
Builder::~Builder() {
@@ -552,6 +556,10 @@ void Builder::Cleanup() {
disk_interface_->RemoveFile(depfile);
}
}
+
+ string err;
+ if (disk_interface_->Stat(lock_file_path_, &err) > 0)
+ disk_interface_->RemoveFile(lock_file_path_);
}
Node* Builder::AddTarget(const string& name, string* err) {
@@ -704,14 +712,25 @@ bool Builder::StartEdge(Edge* edge, string* err) {
status_->BuildEdgeStarted(edge, start_time_millis);
- // Create directories necessary for outputs.
+ TimeStamp build_start = -1;
+
+ // Create directories necessary for outputs and remember the current
+ // filesystem mtime to record later
// XXX: this will block; do we care?
for (vector<Node*>::iterator o = edge->outputs_.begin();
o != edge->outputs_.end(); ++o) {
if (!disk_interface_->MakeDirs((*o)->path()))
return false;
+ if (build_start == -1) {
+ disk_interface_->WriteFile(lock_file_path_, "");
+ build_start = disk_interface_->Stat(lock_file_path_, err);
+ if (build_start == -1)
+ build_start = 0;
+ }
}
+ edge->command_start_time_ = build_start;
+
// Create response file, if needed
// XXX: this may also block; do we care?
string rspfile = edge->GetUnescapedRspfile();
@@ -770,55 +789,42 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
}
// Restat the edge outputs
- TimeStamp output_mtime = 0;
- bool restat = edge->GetBindingBool("restat");
+ TimeStamp record_mtime = 0;
if (!config_.dry_run) {
+ const bool restat = edge->GetBindingBool("restat");
+ const bool generator = edge->GetBindingBool("generator");
bool node_cleaned = false;
-
- for (vector<Node*>::iterator o = edge->outputs_.begin();
- o != edge->outputs_.end(); ++o) {
- TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
- if (new_mtime == -1)
- return false;
- if (new_mtime > output_mtime)
- output_mtime = new_mtime;
- if ((*o)->mtime() == new_mtime && restat) {
- // The rule command did not change the output. Propagate the clean
- // state through the build graph.
- // Note that this also applies to nonexistent outputs (mtime == 0).
- if (!plan_.CleanNode(&scan_, *o, err))
+ record_mtime = edge->command_start_time_;
+
+ // restat and generator rules must restat the outputs after the build
+ // has finished. if record_mtime == 0, then there was an error while
+ // attempting to touch/stat the temp file when the edge started and
+ // we should fall back to recording the outputs' current mtime in the
+ // log.
+ if (record_mtime == 0 || restat || generator) {
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
+ if (new_mtime == -1)
return false;
- node_cleaned = true;
+ if (new_mtime > record_mtime)
+ record_mtime = new_mtime;
+ if ((*o)->mtime() == new_mtime && restat) {
+ // The rule command did not change the output. Propagate the clean
+ // state through the build graph.
+ // Note that this also applies to nonexistent outputs (mtime == 0).
+ if (!plan_.CleanNode(&scan_, *o, err))
+ return false;
+ node_cleaned = true;
+ }
}
}
-
if (node_cleaned) {
- TimeStamp restat_mtime = 0;
- // If any output was cleaned, find the most recent mtime of any
- // (existing) non-order-only input or the depfile.
- for (vector<Node*>::iterator i = edge->inputs_.begin();
- i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
- TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err);
- if (input_mtime == -1)
- return false;
- if (input_mtime > restat_mtime)
- restat_mtime = input_mtime;
- }
-
- string depfile = edge->GetUnescapedDepfile();
- if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) {
- TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err);
- if (depfile_mtime == -1)
- return false;
- if (depfile_mtime > restat_mtime)
- restat_mtime = depfile_mtime;
- }
+ record_mtime = edge->command_start_time_;
// The total number of edges in the plan may have changed as a result
// of a restat.
status_->PlanHasTotalEdges(plan_.command_edge_count());
-
- output_mtime = restat_mtime;
}
}
@@ -832,7 +838,7 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
if (scan_.build_log()) {
if (!scan_.build_log()->RecordCommand(edge, start_time_millis,
- end_time_millis, output_mtime)) {
+ end_time_millis, record_mtime)) {
*err = string("Error writing to build log: ") + strerror(errno);
return false;
}
diff --git a/src/build.h b/src/build.h
index d697dfb..d727a8a 100644
--- a/src/build.h
+++ b/src/build.h
@@ -234,6 +234,7 @@ struct Builder {
/// Time the build started.
int64_t start_time_millis_;
+ std::string lock_file_path_;
DiskInterface* disk_interface_;
DependencyScan scan_;
diff --git a/src/build_log.cc b/src/build_log.cc
index 4dcd6ce..b35279d 100644
--- a/src/build_log.cc
+++ b/src/build_log.cc
@@ -116,9 +116,9 @@ BuildLog::LogEntry::LogEntry(const string& output)
: output(output) {}
BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
- int start_time, int end_time, TimeStamp restat_mtime)
+ int start_time, int end_time, TimeStamp mtime)
: output(output), command_hash(command_hash),
- start_time(start_time), end_time(end_time), mtime(restat_mtime)
+ start_time(start_time), end_time(end_time), mtime(mtime)
{}
BuildLog::BuildLog()
@@ -303,7 +303,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) {
*end = 0;
int start_time = 0, end_time = 0;
- TimeStamp restat_mtime = 0;
+ TimeStamp mtime = 0;
start_time = atoi(start);
start = end + 1;
@@ -319,7 +319,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) {
if (!end)
continue;
*end = 0;
- restat_mtime = strtoll(start, NULL, 10);
+ mtime = strtoll(start, NULL, 10);
start = end + 1;
end = (char*)memchr(start, kFieldSeparator, line_end - start);
@@ -343,7 +343,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) {
entry->start_time = start_time;
entry->end_time = end_time;
- entry->mtime = restat_mtime;
+ entry->mtime = mtime;
if (log_version >= 5) {
char c = *end; *end = '\0';
entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
diff --git a/src/build_log.h b/src/build_log.h
index 88551e3..dd72c4c 100644
--- a/src/build_log.h
+++ b/src/build_log.h
@@ -73,7 +73,7 @@ struct BuildLog {
explicit LogEntry(const std::string& output);
LogEntry(const std::string& output, uint64_t command_hash,
- int start_time, int end_time, TimeStamp restat_mtime);
+ int start_time, int end_time, TimeStamp mtime);
};
/// Lookup a previously-run command by its output path.
diff --git a/src/build_log_test.cc b/src/build_log_test.cc
index 3718299..f03100d 100644
--- a/src/build_log_test.cc
+++ b/src/build_log_test.cc
@@ -133,9 +133,13 @@ TEST_F(BuildLogTest, Truncate) {
log1.RecordCommand(state_.edges_[1], 20, 25);
log1.Close();
}
-
+#ifdef __USE_LARGEFILE64
+ struct stat64 statbuf;
+ ASSERT_EQ(0, stat64(kTestFilename, &statbuf));
+#else
struct stat statbuf;
ASSERT_EQ(0, stat(kTestFilename, &statbuf));
+#endif
ASSERT_GT(statbuf.st_size, 0);
// For all possible truncations of the input file, assert that we don't
diff --git a/src/build_test.cc b/src/build_test.cc
index 4ef62b2..3908761 100644
--- a/src/build_test.cc
+++ b/src/build_test.cc
@@ -611,6 +611,7 @@ bool FakeCommandRunner::StartCommand(Edge* edge) {
fs_->WriteFile(edge->outputs_[0]->path(), content);
} else if (edge->rule().name() == "touch-implicit-dep-out") {
string dep = edge->GetBinding("test_dependency");
+ fs_->Tick();
fs_->Create(dep, "");
fs_->Tick();
for (vector<Node*>::iterator out = edge->outputs_.begin();
@@ -627,7 +628,12 @@ bool FakeCommandRunner::StartCommand(Edge* edge) {
fs_->Create(dep, "");
} else if (edge->rule().name() == "generate-depfile") {
string dep = edge->GetBinding("test_dependency");
+ bool touch_dep = edge->GetBindingBool("touch_dependency");
string depfile = edge->GetUnescapedDepfile();
+ if (touch_dep) {
+ fs_->Tick();
+ fs_->Create(dep, "");
+ }
string contents;
for (vector<Node*>::iterator out = edge->outputs_.begin();
out != edge->outputs_.end(); ++out) {
@@ -635,6 +641,20 @@ bool FakeCommandRunner::StartCommand(Edge* edge) {
fs_->Create((*out)->path(), "");
}
fs_->Create(depfile, contents);
+ } else if (edge->rule().name() == "long-cc") {
+ string dep = edge->GetBinding("test_dependency");
+ string depfile = edge->GetUnescapedDepfile();
+ string contents;
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ fs_->Tick();
+ fs_->Tick();
+ fs_->Tick();
+ fs_->Create((*out)->path(), "");
+ contents += (*out)->path() + ": " + dep + "\n";
+ }
+ if (!dep.empty() && !depfile.empty())
+ fs_->Create(depfile, contents);
} else {
printf("unknown command\n");
return false;
@@ -690,6 +710,18 @@ bool FakeCommandRunner::WaitForCommand(Result* result) {
else
result->status = ExitSuccess;
+ // This rule simulates an external process modifying files while the build command runs.
+ // See TestInputMtimeRaceCondition and TestInputMtimeRaceConditionWithDepFile.
+ // Note: only the first and third time the rule is run per test is the file modified, so
+ // the test can verify that subsequent runs without the race have no work to do.
+ if (edge->rule().name() == "long-cc") {
+ string dep = edge->GetBinding("test_dependency");
+ if (fs_->now_ == 4)
+ fs_->files_[dep].mtime = 3;
+ if (fs_->now_ == 10)
+ fs_->files_[dep].mtime = 9;
+ }
+
// Provide a way for test cases to verify when an edge finishes that
// some other edge is still active. This is useful for test cases
// covering behavior involving multiple active edges.
@@ -1471,7 +1503,7 @@ TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate) {
TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule touch-implicit-dep-out\n"
-" command = touch $test_dependency ; sleep 1 ; touch $out\n"
+" command = sleep 1 ; touch $test_dependency ; sleep 1 ; touch $out\n"
" generator = 1\n"
"build out.imp: touch-implicit-dep-out | inimp inimp2\n"
" test_dependency = inimp\n"));
@@ -1497,6 +1529,29 @@ TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) {
EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
EXPECT_TRUE(builder_.AlreadyUpToDate());
EXPECT_FALSE(GetNode("out.imp")->dirty());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ builder_.Cleanup();
+ builder_.plan_.Reset();
+
+ fs_.Tick();
+ fs_.Create("inimp", "");
+
+ EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ builder_.Cleanup();
+ builder_.plan_.Reset();
+
+ EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+ EXPECT_FALSE(GetNode("out.imp")->dirty());
}
TEST_F(BuildWithLogTest, NotInLogButOnDisk) {
@@ -1800,6 +1855,52 @@ TEST_F(BuildWithLogTest, RestatMissingInput) {
ASSERT_EQ(restat_mtime, log_entry->mtime);
}
+TEST_F(BuildWithLogTest, RestatInputChangesDueToRule) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule generate-depfile\n"
+" command = sleep 1 ; touch $touch_dependency; touch $out ; echo \"$out: $test_dependency\" > $depfile\n"
+"build out1: generate-depfile || cat1\n"
+" test_dependency = in2\n"
+" touch_dependency = 1\n"
+" restat = 1\n"
+" depfile = out.d\n"));
+
+ // Perform the first build. out1 is a restat rule, so its recorded mtime in the build
+ // log should be the time the command completes, not the time the command started. One
+ // of out1's discovered dependencies will have a newer mtime than when out1 started
+ // running, due to its command touching the dependency itself.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ(2u, command_runner_.commands_ran_.size());
+ EXPECT_EQ(2u, builder_.plan_.command_edge_count());
+ BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(2u, log_entry->mtime);
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ builder_.Cleanup();
+ builder_.plan_.Reset();
+
+ fs_.Tick();
+ fs_.Create("in1", "");
+
+ // Touching a dependency of an order-only dependency of out1 should not cause out1 to
+ // rebuild. If out1 were not a restat rule, then it would rebuild here because its
+ // recorded mtime would have been an earlier mtime than its most recent input's (in2)
+ // mtime
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(!state_.GetNode("out1", 0)->dirty());
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ(1u, builder_.plan_.command_edge_count());
+}
+
TEST_F(BuildWithLogTest, GeneratedPlainDepfileMtime) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule generate-depfile\n"
@@ -1904,10 +2005,11 @@ TEST_F(BuildTest, RspFileSuccess)
EXPECT_TRUE(builder_.Build(&err));
ASSERT_EQ(3u, command_runner_.commands_ran_.size());
- // The RSP files were created
- ASSERT_EQ(files_created + 2, fs_.files_created_.size());
+ // The RSP files and temp file to acquire output mtimes were created
+ ASSERT_EQ(files_created + 3, fs_.files_created_.size());
ASSERT_EQ(1u, fs_.files_created_.count("out 2.rsp"));
ASSERT_EQ(1u, fs_.files_created_.count("out 3.rsp"));
+ ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
// The RSP files were removed
ASSERT_EQ(files_removed + 2, fs_.files_removed_.size());
@@ -1941,9 +2043,10 @@ TEST_F(BuildTest, RspFileFailure) {
ASSERT_EQ("subcommand failed", err);
ASSERT_EQ(1u, command_runner_.commands_ran_.size());
- // The RSP file was created
- ASSERT_EQ(files_created + 1, fs_.files_created_.size());
+ // The RSP file and temp file to acquire output mtimes were created
+ ASSERT_EQ(files_created + 2, fs_.files_created_.size());
ASSERT_EQ(1u, fs_.files_created_.count("out.rsp"));
+ ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
// The RSP file was NOT removed
ASSERT_EQ(files_removed, fs_.files_removed_.size());
@@ -2522,6 +2625,210 @@ TEST_F(BuildWithDepsLogTest, DepsIgnoredInDryRun) {
builder.command_runner_.release();
}
+TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceCondition) {
+ string err;
+ const char* manifest =
+ "rule long-cc\n"
+ " command = long-cc\n"
+ "build out: long-cc in1\n"
+ " test_dependency = in1\n";
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ BuildLog build_log;
+ ASSERT_TRUE(build_log.Load("build_log", &err));
+ ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ BuildLog::LogEntry* log_entry = NULL;
+ {
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ // Run the build, out gets built, dep file is created
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // See that an entry in the logfile is created. the input_mtime is 1 since that was
+ // the mtime of in1 when the command was started
+ log_entry = build_log.LookupByOutput("out");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(1u, log_entry->mtime);
+
+ builder.command_runner_.release();
+ }
+
+ {
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ // Trigger the build again - "out" should rebuild despite having a newer mtime than
+ // "in1", since "in1" was touched during the build of out (simulated by changing its
+ // mtime in the the test builder's WaitForCommand() which runs before FinishCommand()
+ command_runner_.commands_ran_.clear();
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // Check that the logfile entry is still correct
+ log_entry = build_log.LookupByOutput("out");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_TRUE(fs_.files_["in1"].mtime < log_entry->mtime);
+ builder.command_runner_.release();
+ }
+
+ {
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ // And a subsequent run should not have any work to do
+ command_runner_.commands_ran_.clear();
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.AlreadyUpToDate());
+
+ builder.command_runner_.release();
+ }
+}
+
+TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceConditionWithDepFile) {
+ string err;
+ const char* manifest =
+ "rule long-cc\n"
+ " command = long-cc\n"
+ "build out: long-cc\n"
+ " deps = gcc\n"
+ " depfile = out.d\n"
+ " test_dependency = header.h\n";
+
+ fs_.Create("header.h", "");
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ BuildLog build_log;
+ ASSERT_TRUE(build_log.Load("build_log", &err));
+ ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ {
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+
+ // Run the build, out gets built, dep file is created
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // See that an entry in the logfile is created. the mtime is 1 due to the command
+ // starting when the file system's mtime was 1.
+ BuildLog::LogEntry* log_entry = build_log.LookupByOutput("out");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(1u, log_entry->mtime);
+
+ builder.command_runner_.release();
+ }
+
+ {
+ // Trigger the build again - "out" will rebuild since its newest input mtime (header.h)
+ // is newer than the recorded mtime of out in the build log
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+
+ {
+ // Trigger the build again - "out" won't rebuild since the file wasn't updated during
+ // the previous build
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(builder.AlreadyUpToDate());
+
+ builder.command_runner_.release();
+ }
+
+ // touch the header to trigger a rebuild
+ fs_.Create("header.h", "");
+ ASSERT_EQ(fs_.now_, 7);
+
+ {
+ // Rebuild. This time, long-cc will cause header.h to be updated while the build is
+ // in progress
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+
+ {
+ // Rebuild. Because header.h is now in the deplog for out, it should be detectable as
+ // a change-while-in-progress and should cause a rebuild of out.
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+
+ {
+ // This time, the header.h file was not updated during the build, so the target should
+ // not be considered dirty.
+ Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ state.Reset();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.AlreadyUpToDate());
+
+ builder.command_runner_.release();
+ }
+}
+
/// Check that a restat rule generating a header cancels compilations correctly.
TEST_F(BuildTest, RestatDepfileDependency) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
@@ -3042,9 +3349,10 @@ TEST_F(BuildTest, DyndepBuild) {
ASSERT_EQ(2u, fs_.files_read_.size());
EXPECT_EQ("dd-in", fs_.files_read_[0]);
EXPECT_EQ("dd", fs_.files_read_[1]);
- ASSERT_EQ(2u + files_created, fs_.files_created_.size());
+ ASSERT_EQ(3u + files_created, fs_.files_created_.size());
EXPECT_EQ(1u, fs_.files_created_.count("dd"));
EXPECT_EQ(1u, fs_.files_created_.count("out"));
+ EXPECT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
}
TEST_F(BuildTest, DyndepBuildSyntaxError) {
diff --git a/src/deps_log.cc b/src/deps_log.cc
index 7e48b38..e32a7a9 100644
--- a/src/deps_log.cc
+++ b/src/deps_log.cc
@@ -361,7 +361,7 @@ bool DepsLog::Recompact(const string& path, string* err) {
return true;
}
-bool DepsLog::IsDepsEntryLiveFor(Node* node) {
+bool DepsLog::IsDepsEntryLiveFor(const Node* node) {
// Skip entries that don't have in-edges or whose edges don't have a
// "deps" attribute. They were in the deps log from previous builds, but
// the the files they were for were removed from the build and their deps
diff --git a/src/deps_log.h b/src/deps_log.h
index 09cc41c..2a1b188 100644
--- a/src/deps_log.h
+++ b/src/deps_log.h
@@ -97,7 +97,7 @@ struct DepsLog {
/// past but are no longer part of the manifest. This function returns if
/// this is the case for a given node. This function is slow, don't call
/// it from code that runs on every build.
- bool IsDepsEntryLiveFor(Node* node);
+ static bool IsDepsEntryLiveFor(const Node* node);
/// Used for tests.
const std::vector<Node*>& nodes() const { return nodes_; }
diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc
index 13fcc78..cb1c925 100644
--- a/src/deps_log_test.cc
+++ b/src/deps_log_test.cc
@@ -138,9 +138,13 @@ TEST_F(DepsLogTest, DoubleEntry) {
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
-
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
file_size = (int)st.st_size;
ASSERT_GT(file_size, 0);
}
@@ -160,9 +164,13 @@ TEST_F(DepsLogTest, DoubleEntry) {
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
-
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
int file_size_2 = (int)st.st_size;
ASSERT_EQ(file_size, file_size_2);
}
@@ -198,9 +206,13 @@ TEST_F(DepsLogTest, Recompact) {
log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps);
log.Close();
-
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
file_size = (int)st.st_size;
ASSERT_GT(file_size, 0);
}
@@ -222,8 +234,13 @@ TEST_F(DepsLogTest, Recompact) {
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
file_size_2 = (int)st.st_size;
// The file should grow to record the new deps.
ASSERT_GT(file_size_2, file_size);
@@ -273,8 +290,13 @@ TEST_F(DepsLogTest, Recompact) {
ASSERT_EQ(other_out, log.nodes()[other_out->id()]);
// The file should have shrunk a bit for the smaller deps.
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
file_size_3 = (int)st.st_size;
ASSERT_LT(file_size_3, file_size_2);
}
@@ -317,8 +339,13 @@ TEST_F(DepsLogTest, Recompact) {
ASSERT_EQ(-1, state.LookupNode("baz.h")->id());
// The file should have shrunk more.
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
int file_size_4 = (int)st.st_size;
ASSERT_LT(file_size_4, file_size_3);
}
@@ -374,8 +401,13 @@ TEST_F(DepsLogTest, Truncated) {
}
// Get the file size.
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
// Try reloading at truncated sizes.
// Track how many nodes/deps were found; they should decrease with
@@ -434,8 +466,13 @@ TEST_F(DepsLogTest, TruncatedRecovery) {
// Shorten the file, corrupting the last record.
{
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
string err;
ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err));
}
diff --git a/src/disk_interface.cc b/src/disk_interface.cc
index e73d901..7277c3e 100644
--- a/src/disk_interface.cc
+++ b/src/disk_interface.cc
@@ -195,8 +195,13 @@ TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
DirCache::iterator di = ci->second.find(base);
return di != ci->second.end() ? di->second : 0;
#else
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ if (stat64(path.c_str(), &st) < 0) {
+#else
struct stat st;
if (stat(path.c_str(), &st) < 0) {
+#endif
if (errno == ENOENT || errno == ENOTDIR)
return 0;
*err = "stat(" + path + "): " + strerror(errno);
@@ -267,7 +272,7 @@ FileReader::Status RealDiskInterface::ReadFile(const string& path,
int RealDiskInterface::RemoveFile(const string& path) {
#ifdef _WIN32
- DWORD attributes = GetFileAttributes(path.c_str());
+ DWORD attributes = GetFileAttributesA(path.c_str());
if (attributes == INVALID_FILE_ATTRIBUTES) {
DWORD win_err = GetLastError();
if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
@@ -278,7 +283,7 @@ int RealDiskInterface::RemoveFile(const string& path) {
// On Windows Ninja should behave the same:
// https://github.com/ninja-build/ninja/issues/1886
// Skip error checking. If this fails, accept whatever happens below.
- SetFileAttributes(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY);
+ SetFileAttributesA(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY);
}
if (attributes & FILE_ATTRIBUTE_DIRECTORY) {
// remove() deletes both files and directories. On Windows we have to
@@ -286,7 +291,7 @@ int RealDiskInterface::RemoveFile(const string& path) {
// used on a directory)
// This fixes the behavior of ninja -t clean in some cases
// https://github.com/ninja-build/ninja/issues/828
- if (!RemoveDirectory(path.c_str())) {
+ if (!RemoveDirectoryA(path.c_str())) {
DWORD win_err = GetLastError();
if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
return 1;
@@ -296,7 +301,7 @@ int RealDiskInterface::RemoveFile(const string& path) {
return -1;
}
} else {
- if (!DeleteFile(path.c_str())) {
+ if (!DeleteFileA(path.c_str())) {
DWORD win_err = GetLastError();
if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
return 1;
diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc
index 5e952ed..7041d98 100644
--- a/src/disk_interface_test.cc
+++ b/src/disk_interface_test.cc
@@ -198,7 +198,7 @@ TEST_F(DiskInterfaceTest, MakeDirs) {
EXPECT_EQ(0, fclose(f));
#ifdef _WIN32
string path2 = "another\\with\\back\\\\slashes\\";
- EXPECT_TRUE(disk_.MakeDirs(path2.c_str()));
+ EXPECT_TRUE(disk_.MakeDirs(path2));
FILE* f2 = fopen((path2 + "a_file").c_str(), "w");
EXPECT_TRUE(f2);
EXPECT_EQ(0, fclose(f2));
diff --git a/src/graph.cc b/src/graph.cc
index 8d58587..95fc1dc 100644
--- a/src/graph.cc
+++ b/src/graph.cc
@@ -298,37 +298,34 @@ bool DependencyScan::RecomputeOutputDirty(const Edge* edge,
return false;
}
- BuildLog::LogEntry* entry = 0;
-
// Dirty if we're missing the output.
if (!output->exists()) {
EXPLAIN("output %s doesn't exist", output->path().c_str());
return true;
}
- // Dirty if the output is older than the input.
- if (most_recent_input && output->mtime() < most_recent_input->mtime()) {
- TimeStamp output_mtime = output->mtime();
-
- // If this is a restat rule, we may have cleaned the output with a restat
- // rule in a previous run and stored the most recent input mtime in the
- // build log. Use that mtime instead, so that the file will only be
- // considered dirty if an input was modified since the previous run.
- bool used_restat = false;
- if (edge->GetBindingBool("restat") && build_log() &&
- (entry = build_log()->LookupByOutput(output->path()))) {
- output_mtime = entry->mtime;
- used_restat = true;
- }
+ BuildLog::LogEntry* entry = 0;
- if (output_mtime < most_recent_input->mtime()) {
- EXPLAIN("%soutput %s older than most recent input %s "
- "(%" PRId64 " vs %" PRId64 ")",
- used_restat ? "restat of " : "", output->path().c_str(),
- most_recent_input->path().c_str(),
- output_mtime, most_recent_input->mtime());
- return true;
- }
+ // If this is a restat rule, we may have cleaned the output in a
+ // previous run and stored the command start time in the build log.
+ // We don't want to consider a restat rule's outputs as dirty unless
+ // an input changed since the last run, so we'll skip checking the
+ // output file's actual mtime and simply check the recorded mtime from
+ // the log against the most recent input's mtime (see below)
+ bool used_restat = false;
+ if (edge->GetBindingBool("restat") && build_log() &&
+ (entry = build_log()->LookupByOutput(output->path()))) {
+ used_restat = true;
+ }
+
+ // Dirty if the output is older than the input.
+ if (!used_restat && most_recent_input && output->mtime() < most_recent_input->mtime()) {
+ EXPLAIN("output %s older than most recent input %s "
+ "(%" PRId64 " vs %" PRId64 ")",
+ output->path().c_str(),
+ most_recent_input->path().c_str(),
+ output->mtime(), most_recent_input->mtime());
+ return true;
}
if (build_log()) {
@@ -346,7 +343,9 @@ bool DependencyScan::RecomputeOutputDirty(const Edge* edge,
// May also be dirty due to the mtime in the log being older than the
// mtime of the most recent input. This can occur even when the mtime
// on disk is newer if a previous run wrote to the output file but
- // exited with an error or was interrupted.
+ // exited with an error or was interrupted. If this was a restat rule,
+ // then we only check the recorded mtime against the most recent input
+ // mtime and ignore the actual output's mtime above.
EXPLAIN("recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")",
output->path().c_str(), most_recent_input->path().c_str(),
entry->mtime, most_recent_input->mtime());
@@ -403,11 +402,7 @@ string EdgeEnv::LookupVariable(const string& var) {
if (var == "in" || var == "in_newline") {
int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ -
edge_->order_only_deps_;
-#if __cplusplus >= 201103L
return MakePathList(edge_->inputs_.data(), explicit_deps_count,
-#else
- return MakePathList(&edge_->inputs_[0], explicit_deps_count,
-#endif
var == "in" ? ' ' : '\n');
} else if (var == "out") {
int explicit_outs_count = edge_->outputs_.size() - edge_->implicit_outs_;
@@ -456,6 +451,28 @@ std::string EdgeEnv::MakePathList(const Node* const* const span,
return result;
}
+void Edge::CollectInputs(bool shell_escape,
+ std::vector<std::string>* out) const {
+ for (std::vector<Node*>::const_iterator it = inputs_.begin();
+ it != inputs_.end(); ++it) {
+ std::string path = (*it)->PathDecanonicalized();
+ if (shell_escape) {
+ std::string unescaped;
+ unescaped.swap(path);
+#ifdef _WIN32
+ GetWin32EscapedString(unescaped, &path);
+#else
+ GetShellEscapedString(unescaped, &path);
+#endif
+ }
+#if __cplusplus >= 201103L
+ out->push_back(std::move(path));
+#else
+ out->push_back(path);
+#endif
+ }
+}
+
std::string Edge::EvaluateCommand(const bool incl_rsp_file) const {
string command = GetBinding("command");
if (incl_rsp_file) {
diff --git a/src/graph.h b/src/graph.h
index 141b439..d07a9b7 100644
--- a/src/graph.h
+++ b/src/graph.h
@@ -172,7 +172,8 @@ struct Edge {
: rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone),
id_(0), outputs_ready_(false), deps_loaded_(false),
deps_missing_(false), generated_by_dep_loader_(false),
- implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {}
+ command_start_time_(0), implicit_deps_(0), order_only_deps_(0),
+ implicit_outs_(0) {}
/// Return true if all inputs' in-edges are ready.
bool AllInputsReady() const;
@@ -195,6 +196,9 @@ struct Edge {
void Dump(const char* prefix="") const;
+ // Append all edge explicit inputs to |*out|. Possibly with shell escaping.
+ void CollectInputs(bool shell_escape, std::vector<std::string>* out) const;
+
const Rule* rule_;
Pool* pool_;
std::vector<Node*> inputs_;
@@ -208,6 +212,7 @@ struct Edge {
bool deps_loaded_;
bool deps_missing_;
bool generated_by_dep_loader_;
+ TimeStamp command_start_time_;
const Rule& rule() const { return *rule_; }
Pool* pool() const { return pool_; }
diff --git a/src/graph_test.cc b/src/graph_test.cc
index 5314bc5..9dba8af 100644
--- a/src/graph_test.cc
+++ b/src/graph_test.cc
@@ -215,6 +215,39 @@ TEST_F(GraphTest, RootNodes) {
}
}
+TEST_F(GraphTest, CollectInputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+ &state_,
+ "build out$ 1: cat in1 in2 in$ with$ space | implicit || order_only\n"));
+
+ std::vector<std::string> inputs;
+ Edge* edge = GetNode("out 1")->in_edge();
+
+ // Test without shell escaping.
+ inputs.clear();
+ edge->CollectInputs(false, &inputs);
+ EXPECT_EQ(5u, inputs.size());
+ EXPECT_EQ("in1", inputs[0]);
+ EXPECT_EQ("in2", inputs[1]);
+ EXPECT_EQ("in with space", inputs[2]);
+ EXPECT_EQ("implicit", inputs[3]);
+ EXPECT_EQ("order_only", inputs[4]);
+
+ // Test with shell escaping.
+ inputs.clear();
+ edge->CollectInputs(true, &inputs);
+ EXPECT_EQ(5u, inputs.size());
+ EXPECT_EQ("in1", inputs[0]);
+ EXPECT_EQ("in2", inputs[1]);
+#ifdef _WIN32
+ EXPECT_EQ("\"in with space\"", inputs[2]);
+#else
+ EXPECT_EQ("'in with space'", inputs[2]);
+#endif
+ EXPECT_EQ("implicit", inputs[3]);
+ EXPECT_EQ("order_only", inputs[4]);
+}
+
TEST_F(GraphTest, VarInOutPathEscaping) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build a$ b: cat no'space with$ space$$ no\"space2\n"));
diff --git a/src/hash_map.h b/src/hash_map.h
index 55d2c9d..4353609 100644
--- a/src/hash_map.h
+++ b/src/hash_map.h
@@ -53,7 +53,6 @@ unsigned int MurmurHash2(const void* key, size_t len) {
return h;
}
-#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
#include <unordered_map>
namespace std {
@@ -68,56 +67,13 @@ struct hash<StringPiece> {
};
}
-#elif defined(_MSC_VER)
-#include <hash_map>
-
-using stdext::hash_map;
-using stdext::hash_compare;
-
-struct StringPieceCmp : public hash_compare<StringPiece> {
- size_t operator()(const StringPiece& key) const {
- return MurmurHash2(key.str_, key.len_);
- }
- bool operator()(const StringPiece& a, const StringPiece& b) const {
- int cmp = memcmp(a.str_, b.str_, min(a.len_, b.len_));
- if (cmp < 0) {
- return true;
- } else if (cmp > 0) {
- return false;
- } else {
- return a.len_ < b.len_;
- }
- }
-};
-
-#else
-#include <ext/hash_map>
-
-using __gnu_cxx::hash_map;
-
-namespace __gnu_cxx {
-template<>
-struct hash<StringPiece> {
- size_t operator()(StringPiece key) const {
- return MurmurHash2(key.str_, key.len_);
- }
-};
-}
-#endif
-
/// A template for hash_maps keyed by a StringPiece whose string is
/// owned externally (typically by the values). Use like:
/// ExternalStringHash<Foo*>::Type foos; to make foos into a hash
/// mapping StringPiece => Foo*.
template<typename V>
struct ExternalStringHashMap {
-#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
typedef std::unordered_map<StringPiece, V> Type;
-#elif defined(_MSC_VER)
- typedef hash_map<StringPiece, V, StringPieceCmp> Type;
-#else
- typedef hash_map<StringPiece, V> Type;
-#endif
};
#endif // NINJA_MAP_H_
diff --git a/src/line_printer.cc b/src/line_printer.cc
index a3d0528..54ba883 100644
--- a/src/line_printer.cc
+++ b/src/line_printer.cc
@@ -46,10 +46,6 @@ LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) {
}
#endif
supports_color_ = smart_terminal_;
- if (!supports_color_) {
- const char* clicolor_force = getenv("CLICOLOR_FORCE");
- supports_color_ = clicolor_force && string(clicolor_force) != "0";
- }
#ifdef _WIN32
// Try enabling ANSI escape sequence support on Windows 10 terminals.
if (supports_color_) {
@@ -61,6 +57,10 @@ LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) {
}
}
#endif
+ if (!supports_color_) {
+ const char* clicolor_force = getenv("CLICOLOR_FORCE");
+ supports_color_ = clicolor_force && std::string(clicolor_force) != "0";
+ }
}
void LinePrinter::Print(string to_print, LineType type) {
diff --git a/src/missing_deps.h b/src/missing_deps.h
index ae57074..7a615da 100644
--- a/src/missing_deps.h
+++ b/src/missing_deps.h
@@ -19,9 +19,7 @@
#include <set>
#include <string>
-#if __cplusplus >= 201103L
#include <unordered_map>
-#endif
struct DepsLog;
struct DiskInterface;
@@ -68,13 +66,8 @@ struct MissingDependencyScanner {
int missing_dep_path_count_;
private:
-#if __cplusplus >= 201103L
using InnerAdjacencyMap = std::unordered_map<Edge*, bool>;
using AdjacencyMap = std::unordered_map<Edge*, InnerAdjacencyMap>;
-#else
- typedef std::map<Edge*, bool> InnerAdjacencyMap;
- typedef std::map<Edge*, InnerAdjacencyMap> AdjacencyMap;
-#endif
AdjacencyMap adjacency_map_;
};
diff --git a/src/ninja.cc b/src/ninja.cc
index 71dea21..9ae53de 100644
--- a/src/ninja.cc
+++ b/src/ninja.cc
@@ -17,6 +17,8 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+
+#include <algorithm>
#include <cstdlib>
#ifdef _WIN32
@@ -52,7 +54,7 @@
using namespace std;
-#ifdef _MSC_VER
+#ifdef _WIN32
// Defined in msvc_helper_main-win32.cc.
int MSVCHelperMain(int argc, char** argv);
@@ -449,7 +451,7 @@ int NinjaMain::ToolBrowse(const Options*, int, char**) {
}
#endif
-#if defined(_MSC_VER)
+#if defined(_WIN32)
int NinjaMain::ToolMSVC(const Options* options, int argc, char* argv[]) {
// Reset getopt: push one argument onto the front of argv, reset optind.
argc++;
@@ -530,7 +532,7 @@ int NinjaMain::ToolDeps(const Options* options, int argc, char** argv) {
if (argc == 0) {
for (vector<Node*>::const_iterator ni = deps_log_.nodes().begin();
ni != deps_log_.nodes().end(); ++ni) {
- if (deps_log_.IsDepsEntryLiveFor(*ni))
+ if (DepsLog::IsDepsEntryLiveFor(*ni))
nodes.push_back(*ni);
}
} else {
@@ -744,7 +746,8 @@ int NinjaMain::ToolCommands(const Options* options, int argc, char* argv[]) {
return 0;
}
-void PrintInputs(Edge* edge, set<Edge*>* seen) {
+void CollectInputs(Edge* edge, std::set<Edge*>* seen,
+ std::vector<std::string>* result) {
if (!edge)
return;
if (!seen->insert(edge).second)
@@ -752,13 +755,41 @@ void PrintInputs(Edge* edge, set<Edge*>* seen) {
for (vector<Node*>::iterator in = edge->inputs_.begin();
in != edge->inputs_.end(); ++in)
- PrintInputs((*in)->in_edge(), seen);
+ CollectInputs((*in)->in_edge(), seen, result);
- if (!edge->is_phony())
- puts(edge->GetBinding("in_newline").c_str());
+ if (!edge->is_phony()) {
+ edge->CollectInputs(true, result);
+ }
}
int NinjaMain::ToolInputs(const Options* options, int argc, char* argv[]) {
+ // The inputs tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "inputs".
+ argc++;
+ argv--;
+ optind = 1;
+ int opt;
+ const option kLongOptions[] = { { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 } };
+ while ((opt = getopt_long(argc, argv, "h", kLongOptions, NULL)) != -1) {
+ switch (opt) {
+ case 'h':
+ default:
+ // clang-format off
+ printf(
+"Usage '-t inputs [options] [targets]\n"
+"\n"
+"List all inputs used for a set of targets. Note that this includes\n"
+"explicit, implicit and order-only inputs, but not validation ones.\n\n"
+"Options:\n"
+" -h, --help Print this message.\n");
+ // clang-format on
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
vector<Node*> nodes;
string err;
if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
@@ -766,9 +797,17 @@ int NinjaMain::ToolInputs(const Options* options, int argc, char* argv[]) {
return 1;
}
- set<Edge*> seen;
+ std::set<Edge*> seen;
+ std::vector<std::string> result;
for (vector<Node*>::iterator in = nodes.begin(); in != nodes.end(); ++in)
- PrintInputs((*in)->in_edge(), &seen);
+ CollectInputs((*in)->in_edge(), &seen, &result);
+
+ // Make output deterministic by sorting then removing duplicates.
+ std::sort(result.begin(), result.end());
+ result.erase(std::unique(result.begin(), result.end()), result.end());
+
+ for (size_t n = 0; n < result.size(); ++n)
+ puts(result[n].c_str());
return 0;
}
@@ -1043,8 +1082,8 @@ const Tool* ChooseTool(const string& tool_name) {
static const Tool kTools[] = {
{ "browse", "browse dependency graph in a web browser",
Tool::RUN_AFTER_LOAD, &NinjaMain::ToolBrowse },
-#if defined(_MSC_VER)
- { "msvc", "build helper for MSVC cl.exe (EXPERIMENTAL)",
+#ifdef _WIN32
+ { "msvc", "build helper for MSVC cl.exe (DEPRECATED)",
Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolMSVC },
#endif
{ "clean", "clean built files",
@@ -1355,11 +1394,28 @@ int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) {
#endif // _MSC_VER
+class DeferGuessParallelism {
+ public:
+ bool needGuess;
+ BuildConfig* config;
+
+ DeferGuessParallelism(BuildConfig* config)
+ : needGuess(true), config(config) {}
+
+ void Refresh() {
+ if (needGuess) {
+ needGuess = false;
+ config->parallelism = GuessParallelism();
+ }
+ }
+ ~DeferGuessParallelism() { Refresh(); }
+};
+
/// Parse argv for command-line options.
/// Returns an exit code, or -1 if Ninja should continue.
int ReadFlags(int* argc, char*** argv,
Options* options, BuildConfig* config) {
- config->parallelism = GuessParallelism();
+ DeferGuessParallelism deferGuessParallelism(config);
enum { OPT_VERSION = 1, OPT_QUIET = 2 };
const option kLongOptions[] = {
@@ -1391,6 +1447,7 @@ int ReadFlags(int* argc, char*** argv,
// We want to run N jobs in parallel. For N = 0, INT_MAX
// is close enough to infinite for most sane builds.
config->parallelism = value > 0 ? value : INT_MAX;
+ deferGuessParallelism.needGuess = false;
break;
}
case 'k': {
@@ -1439,6 +1496,7 @@ int ReadFlags(int* argc, char*** argv,
return 0;
case 'h':
default:
+ deferGuessParallelism.Refresh();
Usage(*config);
return 1;
}
diff --git a/src/parser.cc b/src/parser.cc
index 756922d..5f303c5 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -31,13 +31,6 @@ bool Parser::Load(const string& filename, string* err, Lexer* parent) {
return false;
}
- // The lexer needs a nul byte at the end of its input, to know when it's done.
- // It takes a StringPiece, and StringPiece's string constructor uses
- // string::data(). data()'s return value isn't guaranteed to be
- // null-terminated (although in practice - libc++, libstdc++, msvc's stl --
- // it is, and C++11 demands that too), so add an explicit nul byte.
- contents.resize(contents.size() + 1);
-
return Parse(filename, contents, err);
}
diff --git a/src/status.h b/src/status.h
index e211ba3..b2e50ea 100644
--- a/src/status.h
+++ b/src/status.h
@@ -92,14 +92,14 @@ struct StatusPrinter : Status {
double rate() { return rate_; }
- void UpdateRate(int update_hint, int64_t time_millis_) {
+ void UpdateRate(int update_hint, int64_t time_millis) {
if (update_hint == last_update_)
return;
last_update_ = update_hint;
if (times_.size() == N)
times_.pop();
- times_.push(time_millis_);
+ times_.push(time_millis);
if (times_.back() != times_.front())
rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3);
}
diff --git a/src/util.cc b/src/util.cc
index 080883e..eefa3f5 100644
--- a/src/util.cc
+++ b/src/util.cc
@@ -49,6 +49,9 @@
#include <libperfstat.h>
#elif defined(linux) || defined(__GLIBC__)
#include <sys/sysinfo.h>
+#include <fstream>
+#include <map>
+#include "string_piece_util.h"
#endif
#if defined(__FreeBSD__)
@@ -350,7 +353,8 @@ int ReadFile(const string& path, string* contents, string* err) {
if (!::ReadFile(f, buf, sizeof(buf), &len, NULL)) {
err->assign(GetLastErrorString());
contents->clear();
- return -1;
+ ::CloseHandle(f);
+ return -EIO;
}
if (len == 0)
break;
@@ -365,8 +369,13 @@ int ReadFile(const string& path, string* contents, string* err) {
return -errno;
}
+#ifdef __USE_LARGEFILE64
+ struct stat64 st;
+ if (fstat64(fileno(f), &st) < 0) {
+#else
struct stat st;
if (fstat(fileno(f), &st) < 0) {
+#endif
err->assign(strerror(errno));
fclose(f);
return -errno;
@@ -498,8 +507,160 @@ string StripAnsiEscapeCodes(const string& in) {
return stripped;
}
+#if defined(linux) || defined(__GLIBC__)
+std::pair<int64_t, bool> readCount(const std::string& path) {
+ std::ifstream file(path.c_str());
+ if (!file.is_open())
+ return std::make_pair(0, false);
+ int64_t n = 0;
+ file >> n;
+ if (file.good())
+ return std::make_pair(n, true);
+ return std::make_pair(0, false);
+}
+
+struct MountPoint {
+ int mountId;
+ int parentId;
+ StringPiece deviceId;
+ StringPiece root;
+ StringPiece mountPoint;
+ vector<StringPiece> options;
+ vector<StringPiece> optionalFields;
+ StringPiece fsType;
+ StringPiece mountSource;
+ vector<StringPiece> superOptions;
+ bool parse(const string& line) {
+ vector<StringPiece> pieces = SplitStringPiece(line, ' ');
+ if (pieces.size() < 10)
+ return false;
+ size_t optionalStart = 0;
+ for (size_t i = 6; i < pieces.size(); i++) {
+ if (pieces[i] == "-") {
+ optionalStart = i + 1;
+ break;
+ }
+ }
+ if (optionalStart == 0)
+ return false;
+ if (optionalStart + 3 != pieces.size())
+ return false;
+ mountId = atoi(pieces[0].AsString().c_str());
+ parentId = atoi(pieces[1].AsString().c_str());
+ deviceId = pieces[2];
+ root = pieces[3];
+ mountPoint = pieces[4];
+ options = SplitStringPiece(pieces[5], ',');
+ optionalFields =
+ vector<StringPiece>(&pieces[6], &pieces[optionalStart - 1]);
+ fsType = pieces[optionalStart];
+ mountSource = pieces[optionalStart + 1];
+ superOptions = SplitStringPiece(pieces[optionalStart + 2], ',');
+ return true;
+ }
+ string translate(string& path) const {
+ // path must be sub dir of root
+ if (path.compare(0, root.len_, root.str_, root.len_) != 0) {
+ return string();
+ }
+ path.erase(0, root.len_);
+ if (path == ".." || (path.length() > 2 && path.compare(0, 3, "../") == 0)) {
+ return string();
+ }
+ return mountPoint.AsString() + "/" + path;
+ }
+};
+
+struct CGroupSubSys {
+ int id;
+ string name;
+ vector<string> subsystems;
+ bool parse(string& line) {
+ size_t first = line.find(':');
+ if (first == string::npos)
+ return false;
+ line[first] = '\0';
+ size_t second = line.find(':', first + 1);
+ if (second == string::npos)
+ return false;
+ line[second] = '\0';
+ id = atoi(line.c_str());
+ name = line.substr(second + 1);
+ vector<StringPiece> pieces =
+ SplitStringPiece(StringPiece(line.c_str() + first + 1), ',');
+ for (size_t i = 0; i < pieces.size(); i++) {
+ subsystems.push_back(pieces[i].AsString());
+ }
+ return true;
+ }
+};
+
+map<string, string> ParseMountInfo(map<string, CGroupSubSys>& subsystems) {
+ map<string, string> cgroups;
+ ifstream mountinfo("/proc/self/mountinfo");
+ if (!mountinfo.is_open())
+ return cgroups;
+ while (!mountinfo.eof()) {
+ string line;
+ getline(mountinfo, line);
+ MountPoint mp;
+ if (!mp.parse(line))
+ continue;
+ if (mp.fsType != "cgroup")
+ continue;
+ for (size_t i = 0; i < mp.superOptions.size(); i++) {
+ string opt = mp.superOptions[i].AsString();
+ map<string, CGroupSubSys>::iterator subsys = subsystems.find(opt);
+ if (subsys == subsystems.end())
+ continue;
+ string newPath = mp.translate(subsys->second.name);
+ if (!newPath.empty())
+ cgroups.insert(make_pair(opt, newPath));
+ }
+ }
+ return cgroups;
+}
+
+map<string, CGroupSubSys> ParseSelfCGroup() {
+ map<string, CGroupSubSys> cgroups;
+ ifstream cgroup("/proc/self/cgroup");
+ if (!cgroup.is_open())
+ return cgroups;
+ string line;
+ while (!cgroup.eof()) {
+ getline(cgroup, line);
+ CGroupSubSys subsys;
+ if (!subsys.parse(line))
+ continue;
+ for (size_t i = 0; i < subsys.subsystems.size(); i++) {
+ cgroups.insert(make_pair(subsys.subsystems[i], subsys));
+ }
+ }
+ return cgroups;
+}
+
+int ParseCPUFromCGroup() {
+ map<string, CGroupSubSys> subsystems = ParseSelfCGroup();
+ map<string, string> cgroups = ParseMountInfo(subsystems);
+ map<string, string>::iterator cpu = cgroups.find("cpu");
+ if (cpu == cgroups.end())
+ return -1;
+ std::pair<int64_t, bool> quota = readCount(cpu->second + "/cpu.cfs_quota_us");
+ if (!quota.second || quota.first == -1)
+ return -1;
+ std::pair<int64_t, bool> period =
+ readCount(cpu->second + "/cpu.cfs_period_us");
+ if (!period.second)
+ return -1;
+ if (period.first == 0)
+ return -1;
+ return quota.first / period.first;
+}
+#endif
+
int GetProcessorCount() {
#ifdef _WIN32
+ DWORD cpuCount = 0;
#ifndef _WIN64
// Need to use GetLogicalProcessorInformationEx to get real core count on
// machines with >64 cores. See https://stackoverflow.com/a/31209344/21475
@@ -524,13 +685,31 @@ int GetProcessorCount() {
i += info->Size;
}
if (cores != 0) {
- return cores;
+ cpuCount = cores;
}
}
}
#endif
- return GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+ if (cpuCount == 0) {
+ cpuCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+ }
+ JOBOBJECT_CPU_RATE_CONTROL_INFORMATION info;
+ // reference:
+ // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information
+ if (QueryInformationJobObject(NULL, JobObjectCpuRateControlInformation, &info,
+ sizeof(info), NULL)) {
+ if (info.ControlFlags & (JOB_OBJECT_CPU_RATE_CONTROL_ENABLE |
+ JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP)) {
+ return cpuCount * info.CpuRate / 10000;
+ }
+ }
+ return cpuCount;
#else
+ int cgroupCount = -1;
+ int schedCount = -1;
+#if defined(linux) || defined(__GLIBC__)
+ cgroupCount = ParseCPUFromCGroup();
+#endif
// The number of exposed processors might not represent the actual number of
// processors threads can run on. This happens when a CPU set limitation is
// active, see https://github.com/ninja-build/ninja/issues/1278
@@ -544,10 +723,12 @@ int GetProcessorCount() {
#elif defined(CPU_COUNT)
cpu_set_t set;
if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) {
- return CPU_COUNT(&set);
+ schedCount = CPU_COUNT(&set);
}
#endif
- return sysconf(_SC_NPROCESSORS_ONLN);
+ if (cgroupCount >= 0 && schedCount >= 0) return std::min(cgroupCount, schedCount);
+ if (cgroupCount < 0 && schedCount < 0) return sysconf(_SC_NPROCESSORS_ONLN);
+ return std::max(cgroupCount, schedCount);
#endif
}
diff --git a/src/version.cc b/src/version.cc
index 97afa7e..d306957 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -20,7 +20,7 @@
using namespace std;
-const char* kNinjaVersion = "1.10.2.git";
+const char* kNinjaVersion = "1.12.0.git";
void ParseVersion(const string& version, int* major, int* minor) {
size_t end = version.find('.');