summaryrefslogtreecommitdiff
path: root/tools/dev
diff options
context:
space:
mode:
Diffstat (limited to 'tools/dev')
-rw-r--r--tools/dev/aprerr.txt1
-rw-r--r--tools/dev/benchmarks/RepoPerf/ClearMemory.cpp55
-rw-r--r--tools/dev/benchmarks/RepoPerf/TimeWin.cpp118
-rw-r--r--tools/dev/benchmarks/RepoPerf/copy_repo.py313
-rw-r--r--tools/dev/benchmarks/RepoPerf/win_repo_bench.py268
-rwxr-xr-xtools/dev/benchmarks/large_dirs/create_bigdir.sh10
-rwxr-xr-xtools/dev/build-svn-deps-win.pl919
-rw-r--r--tools/dev/fsfs-access-map.c180
-rw-r--r--tools/dev/fsfs-reorg.c3147
-rw-r--r--tools/dev/gdb-py/svndbg/printers.py8
-rwxr-xr-xtools/dev/po-merge.py14
-rwxr-xr-xtools/dev/remove-trailing-whitespace.sh8
-rw-r--r--tools/dev/svnraisetreeconflict/svnraisetreeconflict.c121
-rwxr-xr-xtools/dev/trails.py18
-rw-r--r--tools/dev/unix-build/Makefile.svn467
-rw-r--r--tools/dev/wc-ng/svn-wc-db-tester.c269
-rwxr-xr-xtools/dev/which-error.py14
-rw-r--r--tools/dev/x509-parser.c178
18 files changed, 2708 insertions, 3400 deletions
diff --git a/tools/dev/aprerr.txt b/tools/dev/aprerr.txt
index 7b532db..281c424 100644
--- a/tools/dev/aprerr.txt
+++ b/tools/dev/aprerr.txt
@@ -1,3 +1,4 @@
+# This file is used by which-error.py and gen_base.py:write_errno_table()
APR_SUCCESS = 0
SOCBASEERR = 10000
SOCEPERM = 10001
diff --git a/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
new file mode 100644
index 0000000..06ef6f5
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
@@ -0,0 +1,55 @@
+/* ClearMemory.cpp --- A simple Window memory cleaning tool
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+int _tmain(int argc, _TCHAR* argv[])
+{
+ // Get the current memory usage stats
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof (statex);
+ GlobalMemoryStatusEx(&statex);
+
+ // (Clean) cache memory will be listed under "available".
+ // So, allocate all available RAM, touch it and release it again.
+ unsigned char *memory = new unsigned char[statex.ullAvailPhys];
+ if (memory)
+ {
+ // Make every page dirty.
+ for (DWORDLONG i = 0; i < statex.ullAvailPhys; i += 4096)
+ memory[i]++;
+
+ // Give everything back to the OS.
+ // The in-RAM file read cache is empty now. There may still be bits in
+ // the swap file as well as dirty write buffers. But we don't care
+ // much about these here ...
+ delete memory;
+ }
+
+ return 0;
+}
+
diff --git a/tools/dev/benchmarks/RepoPerf/TimeWin.cpp b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
new file mode 100644
index 0000000..4acab99
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
@@ -0,0 +1,118 @@
+/* TimeWin.cpp --- A simple Windows tool inspired by Unix' "time".
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+void usage()
+{
+ _tprintf(_T("Execute a command, redirect its stdout to NUL and print\n"));
+ _tprintf(_T("execution times ELAPSED\\tUSER\\tKERNEL in seconds.\n"));
+ _tprintf(_T("\n"));
+ _tprintf(_T("Usage: TimeWin.EXE COMMAND [PARAMETERS]\n"));
+}
+
+LPCTSTR skip_first_arg(LPCTSTR targv)
+{
+ LPCTSTR s = _tcschr(targv, ' ');
+ while (s && *s == ' ')
+ ++s;
+
+ return s;
+}
+
+double as_seconds(FILETIME time)
+{
+ return (double)*reinterpret_cast<LONGLONG *>(&time) / 10000000.0;
+}
+
+int _tmain(int argc, LPTSTR argv[])
+{
+ // Minimal CL help support
+ if (argc < 2 || _tcscmp(argv[1], _T("/?")) == 0)
+ {
+ usage();
+ return 0;
+ }
+
+ // Get a file handle for NUL.
+ SECURITY_ATTRIBUTES sa;
+ sa.nLength = sizeof(sa);
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+
+ HANDLE nul = CreateFile(_T("nul"), FILE_APPEND_DATA, FILE_SHARE_WRITE,
+ &sa, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ // Construct a process startup info that uses the same handles as this
+ // one but redirects stdout to NUL.
+ STARTUPINFO startup_info;
+ GetStartupInfo(&startup_info);
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ startup_info.hStdOutput = nul;
+
+ // Execute the command line.
+ PROCESS_INFORMATION process_info;
+ CreateProcess(NULL, _tscdup(skip_first_arg(GetCommandLine())), NULL, NULL,
+ TRUE, NORMAL_PRIORITY_CLASS, NULL, NULL, &startup_info,
+ &process_info);
+
+ // Get a handle with the needed access rights to the child process.
+ HANDLE child = INVALID_HANDLE_VALUE;
+ DuplicateHandle(GetCurrentProcess(), process_info.hProcess,
+ GetCurrentProcess(), &child,
+ PROCESS_QUERY_INFORMATION | SYNCHRONIZE, FALSE, 0);
+
+ // Wait for the child to finish.
+ // If there was problem earlier (application not found etc.), this will fail.
+ bool success = false;
+ if (WaitForSingleObject(child, INFINITE) == WAIT_OBJECT_0)
+ {
+ // Finally, query the timers and show the result
+ FILETIME start_time, end_time, user_time, kernel_time;
+ if (GetProcessTimes(child, &start_time, &end_time, &kernel_time,
+ &user_time))
+ {
+ _tprintf(_T("%1.3f\t%1.3f\t%1.3f\n"),
+ as_seconds(end_time) - as_seconds(start_time),
+ as_seconds(user_time), as_seconds(kernel_time));
+ success = true;
+ }
+ }
+
+ // In case of failure, give some indication that something went wrong.
+ if (!success)
+ _tprintf(_T("?.???\t?.???f\t?.???\n"),
+
+ // Be good citizens and clean up our mess
+ CloseHandle(child);
+ CloseHandle(process_info.hThread);
+ CloseHandle(process_info.hProcess);
+
+ CloseHandle(nul);
+
+ return 0;
+}
diff --git a/tools/dev/benchmarks/RepoPerf/copy_repo.py b/tools/dev/benchmarks/RepoPerf/copy_repo.py
new file mode 100644
index 0000000..a95a82d
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/copy_repo.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+#
+# copy_repo.py: create multiple, interleaved copies of a set of repositories.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import random
+import shutil
+import sys
+
+class Separators:
+ """ This class is a container for dummy / filler files.
+ It will be used to create spaces between repository
+ versions on disk, i.e. to simulate some aspect of
+ real-world FS fragmentation.
+
+ It gets initialized with some parent path as well as
+ the desired average file size and will create a new
+ such file with each call to write(). Automatic
+ sharding keeps FS specific overhead at bay. Call
+ cleanup() to eventually delete all dummy files. """
+
+ buffer = "A" * 4096
+ """ Write this non-NULL contents into the dummy files. """
+
+ def __init__(self, path, average_size):
+ """ Initialize and store all dummy files in a '__tmp'
+ sub-folder of PATH. The size of each dummy file
+ is a random value and will be slightly AVERAGE_SIZE
+ kBytes on average. A value of 0 will effectively
+ disable dummy file creation. """
+
+ self.path = os.path.join(path, '__tmp')
+ self.size = average_size
+ self.count = 0
+
+ if os.path.exists(self.path):
+ shutil.rmtree(self.path)
+
+ os.mkdir(self.path)
+
+ def write(self):
+ """ Add a new dummy file """
+
+ # Throw dice of a file size.
+ # Factor 1024 for kBytes, factor 2 for being an average.
+ size = (int)(float(self.size) * random.random() * 2 * 1024.0)
+
+ # Don't create empty files. This also implements the
+ # "average = 0 means no files" rule.
+ if size > 0:
+ self.count += 1
+
+ # Create a new shard for every 1000 files
+ subfolder = os.path.join(self.path, str(self.count / 1000))
+ if not os.path.exists(subfolder):
+ os.mkdir(subfolder)
+
+ # Create and write the file in 4k chunks.
+ # Writing full chunks will result in average file sizes
+ # being slightly above the SELF.SIZE. That's good enough
+ # for our purposes.
+ f = open(os.path.join(subfolder, str(self.count)), "wb")
+ while size > 0:
+ f.write(self.buffer)
+ size -= len(self.buffer)
+
+ f.close()
+
+ def cleanup(self):
+ """ Get rid of all the files (and folders) that we created. """
+
+ shutil.rmtree(self.path)
+
+class Repository:
+ """ Encapsulates key information of a repository. Is is being
+ used for copy sources only and contains information about
+ its NAME, PATH, SHARD_SIZE, HEAD revision and MIN_UNPACKED_REV. """
+
+ def _read_config(self, filename):
+ """ Read and return all lines from FILENAME.
+ This will be used to read 'format', 'current' etc. . """
+
+ f = open(os.path.join(self.path, 'db', filename), "rb")
+ lines = f.readlines()
+ f.close()
+
+ return lines
+
+ def __init__(self, parent, name):
+ """ Constructor collecting everything we need to know about
+ the repository NAME within PARENT folder. """
+
+ self.name = name
+ self.path = os.path.join(parent, name)
+
+ self.shard_size = int(self._read_config('format')[1].split(' ')[2])
+ self.min_unpacked_rev = int(self._read_config('min-unpacked-rev')[0])
+ self.head = int(self._read_config('current')[0])
+
+ def needs_copy(self, revision):
+ """ Return True if REVISION is a revision in this repository
+ and is "directly copyable", i.e. is either non-packed or
+ the first rev in a packed shard. Everything else is either
+ not a valid rev or already gets / got copied as part of
+ some packed shard. """
+
+ if revision > self.head:
+ return False
+ if revision < self.min_unpacked_rev:
+ return revision % self.shard_size == 0
+
+ return True
+
+ @classmethod
+ def is_repository(cls, path):
+ """ Quick check that PATH is (probably) a repository.
+ This is mainly to filter out aux files put next to
+ (not inside) the repositories to copy. """
+
+ format_path = os.path.join(path, 'db', 'format')
+ return os.path.isfile(format_path)
+
+class Multicopy:
+ """ Helper class doing the actual copying. It copies individual
+ revisions and packed shards from the one source repository
+ to multiple copies of it. The copies have the same name
+ as the source repo but with numbers 0 .. N-1 appended to it.
+
+ The copy process is being initiated by the constructor
+ (copies the repo skeleton w/o revision contents). Revision
+ contents is then copied by successive calls to the copy()
+ method. """
+
+ def _init_copy(self, number):
+ """ Called from the constructor, this will copy SELF.SOURCE_REPO
+ into NUMBER new repos below SELF.DEST_BASE but omit everything
+ below db/revs and db/revprops. """
+
+ src = self.source_repo.path
+ dst = self.dest_base + str(number)
+
+ # Copy the repo skeleton w/o revs and revprops
+ shutil.copytree(src, dst, ignore=shutil.ignore_patterns('revs', 'revprops'))
+
+ # Add revs and revprops
+ self.dst_revs.append(os.path.join(dst, 'db', 'revs'))
+ self.dst_revprops.append(os.path.join(dst, 'db', 'revprops'))
+
+ os.mkdir(self.dst_revs[number])
+ os.mkdir(self.dst_revprops[number])
+
+ def _copy_packed_shard(self, shard, number):
+ """ Copy packed shard number SHARD from SELF.SOURCE_REPO to
+ the copy NUMBER below SELF.DEST_BASE. """
+
+ # Shards are simple subtrees
+ src_revs = os.path.join(self.src_revs, str(shard) + '.pack')
+ dst_revs = os.path.join(self.dst_revs[number], str(shard) + '.pack')
+ src_revprops = os.path.join(self.src_revprops, str(shard) + '.pack')
+ dst_revprops = os.path.join(self.dst_revprops[number], str(shard) + '.pack')
+
+ shutil.copytree(src_revs, dst_revs)
+ shutil.copytree(src_revprops, dst_revprops)
+
+ # Special case: revprops of rev 0 are never packed => extra copy
+ if shard == 0:
+ src_revprops = os.path.join(self.src_revprops, '0')
+ dest_revprops = os.path.join(self.dst_revprops[number], '0')
+
+ shutil.copytree(src_revprops, dest_revprops)
+
+ def _copy_single_revision(self, revision, number):
+ """ Copy non-packed REVISION from SELF.SOURCE_REPO to the copy
+ NUMBER below SELF.DEST_BASE. """
+
+ shard = str(revision / self.source_repo.shard_size)
+
+ # Auto-create shard folder
+ if revision % self.source_repo.shard_size == 0:
+ os.mkdir(os.path.join(self.dst_revs[number], shard))
+ os.mkdir(os.path.join(self.dst_revprops[number], shard))
+
+ # Copy the rev file and the revprop file
+ src_rev = os.path.join(self.src_revs, shard, str(revision))
+ dest_rev = os.path.join(self.dst_revs[number], shard, str(revision))
+ src_revprop = os.path.join(self.src_revprops, shard, str(revision))
+ dest_revprop = os.path.join(self.dst_revprops[number], shard, str(revision))
+
+ shutil.copyfile(src_rev, dest_rev)
+ shutil.copyfile(src_revprop, dest_revprop)
+
+ def __init__(self, source, target_parent, count):
+ """ Initiate the copy process for the SOURCE repository to
+ be copied COUNT times into the TARGET_PARENT directory. """
+
+ self.source_repo = source
+ self.dest_base = os.path.join(target_parent, source.name)
+
+ self.src_revs = os.path.join(source.path, 'db', 'revs')
+ self.src_revprops = os.path.join(source.path, 'db', 'revprops')
+
+ self.dst_revs = []
+ self.dst_revprops = []
+ for i in range(0, count):
+ self._init_copy(i)
+
+ def copy(self, revision, number):
+ """ Copy (packed or non-packed) REVISION from SELF.SOURCE_REPO
+ to the copy NUMBER below SELF.DEST_BASE.
+
+ SELF.SOURCE_REPO.needs_copy(REVISION) must be True. """
+
+ if revision < self.source_repo.min_unpacked_rev:
+ self._copy_packed_shard(revision / self.source_repo.shard_size, number)
+ else:
+ self._copy_single_revision(revision, number)
+
+def copy_repos(src, dst, count, separator_size):
+ """ Under DST, create COUNT copies of all repositories immediately
+ below SRC.
+
+ All copies will "interleaved" such that we copy each individual
+ revision / packed shard to all target repos first before
+ continuing with the next revision / packed shard. After each
+ round (revision / packed shard) insert a temporary file of
+ SEPARATOR_SIZE kBytes on average to add more spacing between
+ revisions. The temp files get automatically removed at the end.
+
+ Please note that this function will clear DST before copying
+ anything into it. """
+
+ # Remove any remnants from the target folder.
+ # (DST gets auto-created by the first repo copy.)
+ shutil.rmtree(dst)
+
+ # Repositories to copy and the respective copy utilities
+ repositories = []
+ copies = []
+
+ # Find repositories, initiate copies and determine the range of
+ # revisions to copy in total
+ max_revision = 0
+ for name in os.listdir(src):
+ if Repository.is_repository(os.path.join(src, name)):
+ repository = Repository(src, name)
+ repositories.append(repository)
+ copies.append(Multicopy(repository, dst, count))
+
+ if repository.head > max_revision:
+ max_revision = repository.head
+
+ # Temp file collection (spacers)
+ separators = Separators(dst, separator_size)
+
+ # Copy all repos in revision,number-major order
+ for revision in xrange(0, max_revision + 1):
+ for number in xrange(0, count):
+
+ any_copy = False
+ for i in xrange(0, len(repositories)):
+ if repositories[i].needs_copy(revision):
+ any_copy = True
+ copies[i].copy(revision, number)
+
+ # Don't add spacers when nothing got copied (REVISION is
+ # packed in all repositories).
+ if any_copy:
+ separators.write()
+
+ # Now that all data is in position, remove the spacers
+ separators.cleanup()
+
+def show_usage():
+ """ Write a simple CL docstring """
+
+ print "Copies and duplicates repositories in a way that mimics larger deployments."
+ print
+ print "Usage:"
+ print "copy_repo.py SRC DST COUNT SEPARATOR_SIZE"
+ print
+ print "SRC Immediate parent folder of all the repositories to copy."
+ print "DST Folder to copy into; current contents will be lost."
+ print "COUNT Number of copies to create of each source repository."
+ print "SEPARATOR_SIZE Additional spacing, in kBytes, between revisions."
+
+#main function
+if len(argv) == 5:
+ copy_repos(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
+else:
+ show_usage()
diff --git a/tools/dev/benchmarks/RepoPerf/win_repo_bench.py b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
new file mode 100644
index 0000000..d470a04
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+#
+# win_repo_bench.py: run repository / server performance tests on Windows.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import shutil
+import sys
+import subprocess
+import time
+
+from win32com.shell import shell, shellcon
+
+# Adapt these paths to your needs
+
+# Contains all the REPOSITORIES
+repo_parent = "C:\\repos"
+
+# Where to create working copies
+wc_path = "C:\\wc"
+exe_path = "C:\\develop\\Subversion\\trunk\\Release"
+apache_path = "C:\\develop\\Subversion"
+
+# Test these repositories and in this order.
+# Actual repository names have numbers 0 .. REPETITIONS-1 append to them
+repositories = ["ruby-f6-nonpacked", "ruby-f7-nonpacked",
+ "ruby-f6-packed", "ruby-f7-packed",
+ "bsd-f6-nonpacked", "bsd-f7-nonpacked",
+ "bsd-f6-packed", "bsd-f7-packed"]
+
+# Basically lists the RA backends to test but as long as all repositories
+# can be accessed using any of them, arbitrary URLs are possible.
+prefixes = ["svn://localhost/", "http://localhost/svn/", "file:///C:/repos/"]
+
+# Number of time to repeat the tests. For each iteration, there must be
+# a separate copy of all repositories.
+repetitions = 3
+
+# Server configurations to test
+configurations = ['slow', 'medium', 'fast']
+svnserve_params = {
+ 'slow':"",
+ 'medium':"-M 256" ,
+ 'fast':"-M 1024 -c 0 --cache-revprops yes --block-read yes --client-speed 1000"
+}
+
+
+def clear_memory():
+ """ Clear in-RAM portion of the file / disk cache """
+ subprocess.call(["ClearMemory.exe"])
+
+def start_server(prefix, config):
+ """ Depending on the url PREFIX, start the corresponding server with the
+ given CONFIGuration. file: and http: access will actually have been
+ configured by set_config(). """
+
+ if prefix[:4] == "svn:":
+ exe = os.path.join(exe_path, "svnserve.exe")
+ command = "cmd.exe /c start " + exe + " -dr " + repo_parent + \
+ " " + svnserve_params[config]
+ subprocess.call(command)
+ time.sleep(2)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k start")
+ time.sleep(2)
+
+def stop_server(prefix):
+ """ Depending on the url PREFIX, stop / kill the corresponding server. """
+
+ if prefix[:4] == "svn:":
+ subprocess.call("cmd.exe /c taskkill /im svnserve.exe /f > nul 2>&1")
+ time.sleep(1)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k stop")
+ time.sleep(1)
+
+def run_cs_command(state, config, repository, prefix, args):
+ """ Run the client-side command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY, the url PREFIX
+ and finally the execution times. """
+
+ # Make sure we can create a new working copy if we want to.
+ if os.path.exists(wc_path):
+ shutil.rmtree(wc_path)
+
+ # Select the client to use.
+ if ('null-export' in args) or ('null-log' in args):
+ exe = os.path.join(exe_path, "svn-bench.exe")
+ else:
+ exe = os.path.join(exe_path, "svn.exe")
+
+ # Display the operation
+ repo_title = repository.replace('nonpacked', 'nopack')
+ print state, "\t", repo_title, "\t", prefix, "\t", config, "\t",
+ sys.stdout.flush()
+
+ # Execute the command and show the execution times
+ subprocess.call(["TimeWin.exe", exe] + args)
+
+
+def run_test_cs_sequence(config, repository, run, prefix, command, args):
+ """ Run the client-side COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on REPOSITORY
+ with the given url PREFIX. """
+
+ # Build the full URL to use. Exports operate on the main dev line only.
+ url = prefix + repository + str(run)
+ if (command == 'export') or (command == 'null-export'):
+ if repository[:3] == 'bsd':
+ url += '/head'
+ else:
+ url += '/trunk'
+
+ # Full set of command arguments
+ args = [command, url] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # Caches are quite cool now and ready to take up new data
+ start_server(prefix, config)
+ run_cs_command("Cold", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches are quite hot now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("WarmOS", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches may be even hotter now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("HotOS", config, repository, prefix, args)
+
+ # Keep server process and thus the warmed up SVN caches.
+ # Run operation from hot OS and SVN caches.
+ run_cs_command("WrmSVN", config, repository, prefix, args)
+ run_cs_command("HotSVN", config, repository, prefix, args)
+ stop_server(prefix)
+
+
+def set_config(config):
+ """ Switch configuration files to CONFIG. This overwrites the client
+ config file with config.$CONFIG and the server config file with
+ subversion.$CONFIG.conf. """
+
+ appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, None, 0)
+ svn_config_folder = os.path.join(appdata, 'Subversion')
+ svn_config_file = os.path.join(svn_config_folder, 'config')
+ svn_config_template = svn_config_file + '.' + config
+
+ shutil.copyfile(svn_config_template, svn_config_file)
+
+ apache_config_folder = os.path.join(apache_path, 'conf', 'extra')
+ apache_config_file = os.path.join(apache_config_folder, 'subversion.conf')
+ apache_config_template = os.path.join(apache_config_folder,
+ 'subversion.' + config + '.conf')
+
+ shutil.copyfile(apache_config_template, apache_config_file)
+
+
+def run_test_cs_configurations(command, args):
+ """ Run client COMMAND with basic arguments ARGS in all configurations
+ repeatedly with all servers on all repositories. """
+
+ print
+ print command
+ print
+
+ for config in configurations:
+ set_config(config)
+ for prefix in prefixes:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_cs_sequence(config, repository, run, prefix, command, args)
+
+def run_admin_command(state, config, repository, args):
+ """ Run the svnadmin command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY and finally
+ the execution times. """
+
+ exe = os.path.join(exe_path, "svnadmin.exe")
+
+ if config == 'medium':
+ extra = ['-M', '256']
+ elif config == 'fast':
+ extra = ['-M', '1024']
+ else:
+ extra = []
+
+ print state, "\t", repository, "\t", config, "\t",
+ sys.stdout.flush()
+ subprocess.call(["TimeWin.exe", exe] + args + extra)
+
+def run_test_admin_sequence(config, repository, run, command, args):
+ """ Run the svnadmin COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on
+ REPOSITORY. """
+
+ # Full set of command arguments
+ path = os.path.join(repo_parent, repository + str(run))
+ args = [command, path] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # svnadmin runs can be quite costly and are usually CPU-bound.
+ # Test with "cold" and "hot" CPU caches only.
+ run_admin_command("Cold", config, repository, args)
+ run_admin_command("Hot", config, repository, args)
+
+
+def run_test_admin_configurations(command, args):
+ """ Run svnadmin COMMAND with basic arguments ARGS in all configurations
+ repeatedly on all repositories. """
+
+ print
+ print command
+ print
+
+ for config in configurations:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_admin_sequence(config, repository, run, command, args)
+
+
+def bench():
+ """ Run all performance tests. """
+
+ run_test_cs_configurations('log', ['-v', '--limit', '50000'])
+ run_test_cs_configurations('export', [wc_path, '-q'])
+
+ run_test_cs_configurations('null-log', ['-v', '--limit', '50000', '-q'])
+ run_test_cs_configurations('null-export', ['-q'])
+
+ run_test_admin_configurations('dump', ['-q'])
+
+# main function
+bench()
diff --git a/tools/dev/benchmarks/large_dirs/create_bigdir.sh b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
index a389dcc..c2830c8 100755
--- a/tools/dev/benchmarks/large_dirs/create_bigdir.sh
+++ b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
@@ -29,7 +29,7 @@ SVNPATH="$('pwd')/subversion"
# Comment the SVNSERVE line to use file:// instead of svn://.
SVN=${SVNPATH}/svn/svn
-SVNADMIN=${SVNPATH}/svnadmin/svnadmin
+SVNADMIN=${SVNPATH}/svnadmin/svnadmin
SVNSERVE=${SVNPATH}/svnserve/svnserve
# VALGRIND="valgrind --tool=callgrind"
@@ -45,7 +45,7 @@ REPOROOT=/dev/shm
FILECOUNT=1
MAXCOUNT=20000
-# only 1.7 supports server-side caching and uncompressed data transfer
+# only 1.7 supports server-side caching and uncompressed data transfer
SERVEROPTS="-c 0 -M 400"
@@ -162,7 +162,7 @@ run_svn_get() {
fi
}
-# main loop
+# main loop
while [ $FILECOUNT -lt $MAXCOUNT ]; do
echo "Processing $FILECOUNT files in the same folder"
@@ -172,7 +172,7 @@ while [ $FILECOUNT -lt $MAXCOUNT ]; do
mkdir $WC/$FILECOUNT
for i in 1 $sequence; do
echo "File number $i" > $WC/$FILECOUNT/$i
- done
+ done
printf "\tAdding files ... \t"
run_svn add $FILECOUNT -q
@@ -182,7 +182,7 @@ while [ $FILECOUNT -lt $MAXCOUNT ]; do
printf "\tCommit files ... \t"
run_svn_ci $FILECOUNT add
-
+
printf "\tListing files ... \t"
run_svn ls $FILECOUNT
diff --git a/tools/dev/build-svn-deps-win.pl b/tools/dev/build-svn-deps-win.pl
new file mode 100755
index 0000000..d936369
--- /dev/null
+++ b/tools/dev/build-svn-deps-win.pl
@@ -0,0 +1,919 @@
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+# Script to build all the dependencies for Subversion on Windows
+# It's been written for Windows 8 and Visual Studio 2012, but
+# it's entirely possible it will work with older versions of both.
+
+# The goal here is not to necessarily have everyone using this script.
+# But rather to be able to produce binary packages of the dependencies
+# already built to allow developers to be able to download or checkout
+# Subversion and quickly get up a development environment.
+
+# Prerequisites:
+# Perl: http://www.activestate.com/activeperl/downloads
+# Python: http://www.activestate.com/activepython/downloads
+# 7-Zip: http://www.7-zip.org/download.html
+# CMake: http://www.cmake.org/cmake/resources/software.html
+# Microsoft Visual Studio 2012 (Ultimate has been tested, Express does not work)
+#
+# You probably want these on your PATH. The installers usually
+# offer an option to do that for you so if you can let them.
+#
+# You are expected to run this script within the correct Visual Studio
+# Shell. Probably "VS2012 x86 Native Tools Command Prompt". This
+# sets the proper PATH arguments so that the the compiler tools are
+# available.
+#
+# TODO:
+# Find some way to work around the lack of devenv in Express (msbuild will help some)
+# Include a package target that zips everything up.
+# Perl script that runs the Subversion get-make.py tool with the right args.
+# Alternatively update gen-make.py with an arg that knows about our layout.
+# Make the Windows build not expect to go looking into source code (httpd/zlib)
+# Add SWIG (to support checkout builds where SWIG generation hasn't been done).
+# Usage/help output from the usual flags/on error input.
+# Make SQLITE_VER friendly since we're using no dots right now.
+# Work out the fixes to the projects' sources and contribute them back.
+# Allow selection of Arch (x86 and x64)
+# ZLib support for OpenSSL (have to patch openssl)
+# Use CMake zlib build instead.
+# Assembler support for OpenSSL.
+# Add more specific commands to the command line (e.g. build-httpd)
+
+###################################
+###### V A R I A B L E S ######
+###################################
+package Vars;
+# variables in the Vars package can be overriden from the command
+# line with the FOO=BAR syntax. If you want any defaults to reference
+# other variables the defaults need to be in set_defaults() below to
+# allow the defaults to be set after processing user set variables.
+
+# Paths to commands to use, provide full paths if it's not
+# on your PATH already.
+our $SEVEN_ZIP = 'C:\Program Files\7-Zip\7z.exe';
+our $CMAKE = 'cmake';
+our $NMAKE = 'nmake';
+# Use the .com version so we get output, the .exe doesn't produce any output
+our $DEVENV = 'devenv.com';
+our $VCUPGRADE = 'vcupgrade';
+our $PYTHON = 'python';
+
+# Versions of the dependencies we will use
+# Change these if you want but these are known to work with
+# this script as is.
+our $HTTPD_VER = '2.4.4';
+our $APR_VER = '1.4.6';
+our $APU_VER = '1.5.2'; # apr-util version
+our $API_VER = '1.2.1'; # arp-iconv version
+our $ZLIB_VER = '1.2.8';
+our $OPENSSL_VER = '1.0.1e';
+our $PCRE_VER = '8.35';
+our $BDB_VER = '5.3.21';
+our $SQLITE_VER = '3071602';
+our $SERF_VER = '1.3.6';
+our $NEON_VER = '0.29.6';
+
+# Sources for files to download
+our $AWK_URL = 'http://www.cs.princeton.edu/~bwk/btl.mirror/awk95.exe';
+our $HTTPD_URL;
+our $APR_URL;
+our $APU_URL;
+our $API_URL;
+our $ZLIB_URL;
+our $OPENSSL_URL;
+our $PCRE_URL;
+our $BDB_URL;
+our $SQLITE_URL;
+our $SERF_URL;
+our $NEON_URL;
+our $PROJREF_URL = 'https://downloads.redhoundsoftware.com/blog/ProjRef.py';
+
+# Location of the already downloaded file.
+# by default these are undefined and set by the downloader.
+# However, they can be overriden from the commandline and then
+# the downloader is skipped. Note that BDB has no downloader
+# so it must be overriden from the command line.
+our $AWK_FILE;
+our $HTTPD_FILE;
+our $APR_FILE;
+our $APU_FILE;
+our $API_FILE;
+our $ZLIB_FILE;
+our $OPENSSL_FILE;
+our $PCRE_FILE;
+our $BDB_FILE;
+our $SQLITE_FILE;
+our $SERF_FILE;
+our $NEON_FILE;
+our $PROJREF_FILE;
+
+# Various directories we use
+our $TOPDIR = Cwd::cwd(); # top of our tree
+our $INSTDIR; # where we install to
+our $BLDDIR; # directory where we actually build
+our $SRCDIR; # directory where we store package files
+
+# Some other options
+our $VS_VER;
+our $NEON;
+our $SVN_VER = '1.9.x';
+our $DEBUG = 0;
+
+# Utility function to remove dots from a string
+sub remove_dots {
+ my $in = shift;
+
+ $in =~ tr/.//d;
+ return $in;
+}
+
+# unless the variable is already defined set the value
+sub set_default {
+ my $var = shift;
+ my $value = shift;
+
+ unless (defined($$var)) {
+ $$var = $value;
+ }
+}
+
+sub set_svn_ver_defaults {
+ my ($svn_major, $svn_minor, $svn_patch) = $SVN_VER =~ /^(\d+)\.(\d+)\.(.+)$/;
+
+ if ($svn_major > 1 or ($svn_major == 1 and $svn_minor >= 8)) {
+ $NEON=0 unless defined($NEON);
+ } else {
+ $NEON=1 unless defined($NEON);
+ }
+}
+
+# Any variables with defaults that reference other values
+# should be set here. This defers setting of the default until runtime in these cases.
+sub set_defaults {
+ set_default(\$HTTPD_URL, "http://archive.apache.org/dist/httpd/httpd-$HTTPD_VER.tar.bz2");
+ set_default(\$APR_URL, "http://archive.apache.org/dist/apr/apr-$APR_VER.tar.bz2");
+ set_default(\$APU_URL, "http://archive.apache.org/dist/apr/apr-util-$APU_VER.tar.bz2");
+ set_default(\$API_URL, "http://archive.apache.org/dist/apr/apr-iconv-$API_VER.tar.bz2");
+ set_default(\$ZLIB_URL, "http://sourceforge.net/projects/libpng/files/zlib/$ZLIB_VER/zlib" . remove_dots($ZLIB_VER) . '.zip');
+ set_default(\$OPENSSL_URL, "http://www.openssl.org/source/openssl-$OPENSSL_VER.tar.gz");
+ set_default(\$PCRE_URL, "ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-$PCRE_VER.zip");
+ set_default(\$BDB_URL, "http://download.oracle.com/berkeley-db/db-5.3.21.zip");
+ set_default(\$SQLITE_URL, "http://www.sqlite.org/2013/sqlite-amalgamation-$SQLITE_VER.zip");
+ set_default(\$SERF_URL, "https://archive.apache.org/dist/serf/serf-$SERF_VER.zip");
+ set_default(\$NEON_URL, "http://www.webdav.org/neon/neon-$NEON_VER.tar.gz");
+ set_default(\$INSTDIR, $TOPDIR);
+ set_default(\$BLDDIR, "$TOPDIR\\build");
+ set_default(\$SRCDIR, "$TOPDIR\\sources");
+ set_svn_ver_defaults();
+}
+
+#################################
+###### M A I N ######
+#################################
+# You shouldn't have any reason to modify below this unless you've changed
+# versions of something.
+package main;
+
+use warnings;
+use strict;
+
+use LWP::Simple;
+use File::Path;
+use File::Copy;
+use File::Basename;
+use File::Find;
+use Cwd;
+use Config;
+
+# Full path to perl, this shouldn't need to be messed with
+my $PERL = $Config{perlpath};
+
+# Directory constants that we setup for convenience, but that
+# shouldn't be changed since they are assumed in the build systems
+# of the various dependencies.
+my $HTTPD; # Where httpd gets built
+my $BDB; # Where bdb gets built
+my $BINDIR; # where binaries are installed
+my $LIBDIR; # where libraries are installed
+my $INCDIR; # where headers are installed
+my $SRCLIB; # httpd's srclib dir
+
+# defer setting these values till runtime so users can override the
+# user controlled vars they derive from.
+sub set_paths {
+ $HTTPD = "$BLDDIR\\httpd";
+ $BDB = "$BLDDIR\\bdb";
+ $BINDIR = "$INSTDIR\\bin";
+ $LIBDIR = "$INSTDIR\\lib";
+ $INCDIR = "$INSTDIR\\include";
+ $SRCLIB = "$HTTPD\\srclib";
+ # Add bin to PATH this will be needed for at least awk later on
+ $ENV{PATH} = "$BINDIR;$ENV{PATH}";
+ # Setup LIB and INCLUDE so we can find BDB
+ $ENV{LIB} = "$LIBDIR;$ENV{LIB}";
+ $ENV{INCLUDE} = "$INCDIR;$ENV{INCLUDE}";
+}
+
+#####################
+# UTILTIY FUNCTIONS #
+#####################
+
+# copy a file with error handling
+sub copy_or_die {
+ my $src = shift;
+ my $dest = shift;
+
+ copy($src, $dest) or die "Failed to copy $src to $dest: $!";
+}
+
+# Rename a file and deal with errors.
+sub rename_or_die {
+ my $src = shift;
+ my $dest = shift;
+
+ rename($src, $dest) or die "Failed to rename $src to $dest: $!";
+}
+
+# Utility function to chdir with error handling.
+sub chdir_or_die {
+ my $dir = shift;
+
+ chdir($dir) or die "Failed to chdir to $dir: $!";
+}
+
+# Utility function to call system with error handling.
+# First arg is an error message to print if something fails.
+# Remaining args are passed to system.
+sub system_or_die {
+ my $error_msg = shift;
+ unless (system(@_) == 0) {
+ if (defined($error_msg)) {
+ die "$error_msg (exit code: $?)";
+ } else {
+ die "Failed while running '@_' (exit code: $?)";
+ }
+ }
+}
+
+# Like perl -pi.orig the second arg is a reference to a
+# function that does whatever line processing you want.
+# Note that $_ is used for the input and output of the
+# function. So modifying $_ changes the line in the file.
+# bak can be passed to set the backup extension. If the
+# backup file already exists, shortcut this step.
+sub modify_file_in_place {
+ my $file = shift;
+ my $func = shift;
+ my $bak = shift;
+
+ unless (defined($bak)) {
+ $bak = '.orig';
+ }
+
+ my $backup = $file . $bak;
+ return if -e $backup;
+ rename_or_die($file, $backup);
+ open(IN, "<$backup") or die "Failed to open $backup: $!";
+ open(OUT, ">$file") or die "Failed to open $file: $!";
+ while (<IN>) {
+ &{$func}();
+ print OUT;
+ }
+ close(IN);
+ close(OUT);
+}
+
+sub check_vs_ver {
+ return if defined($VS_VER);
+
+ # using the vcupgrade command here because it has a consistent name and version
+ # numbering across versions including express versions.
+ my $help_output = `"$VCUPGRADE" /?`;
+ my ($major_version) = $help_output =~ /Version (\d+)\./s;
+
+ if (defined($major_version)) {
+ if ($major_version eq '12') {
+ $VS_VER = '2013';
+ return;
+ } elsif ($major_version eq '11') {
+ $VS_VER = '2012';
+ return;
+ } elsif ($major_version eq '10') {
+ $VS_VER = '2010';
+ return;
+ }
+ }
+
+ die("Visual Studio Version Not Supported");
+}
+
+##################
+# TREE STRUCTURE #
+##################
+
+# Create directories that this script directly needs
+sub prepare_structure {
+ # ignore errors the directories may already exist.
+ mkdir($BINDIR);
+ mkdir($SRCDIR);
+ mkdir($BLDDIR);
+ mkdir($LIBDIR);
+ mkdir($INCDIR);
+}
+
+# Remove paths created by this script (directly or indecirectly)
+# If the first arg is 1 it'll remove the downloaded files otherwise it
+# leaves them alone.
+sub clean_structure {
+ # ignore errors in this function the paths may not exist
+ my $real_clean = shift;
+
+ if ($real_clean) {
+ rmtree($SRCDIR);
+ }
+ rmtree($BINDIR);
+ rmtree($BLDDIR);
+ rmtree($INCDIR);
+ rmtree($LIBDIR);
+ rmtree("$INSTDIR\\serf");
+ rmtree("$INSTDIR\\neon");
+ rmtree("$INSTDIR\\sqlite-amalgamation");
+
+ # Dirs created indirectly by the install targets
+ rmtree("$INSTDIR\\man");
+ rmtree("$INSTDIR\\share");
+ rmtree("$INSTDIR\\ssl");
+ rmtree("$INSTDIR\\cgi-bin");
+ rmtree("$INSTDIR\\conf");
+ rmtree("$INSTDIR\\error");
+ rmtree("$INSTDIR\\htdocs");
+ rmtree("$INSTDIR\\icons");
+ rmtree("$INSTDIR\\logs");
+ rmtree("$INSTDIR\\manual");
+ rmtree("$INSTDIR\\modules");
+ unlink("$INSTDIR\\ABOUT_APACHE.txt");
+ unlink("$INSTDIR\\CHANGES.txt");
+ unlink("$INSTDIR\\INSTALL.txt");
+ unlink("$INSTDIR\\LICENSE.txt");
+ unlink("$INSTDIR\\NOTICE.txt");
+ unlink("$INSTDIR\\OPENSSL-NEWS.txt");
+ unlink("$INSTDIR\\OPENSSL-README.txt");
+ unlink("$INSTDIR\\README.txt");
+}
+
+############
+# DOWNLOAD #
+############
+
+# Download a url into a file if successful put the destination into the
+# variable referenced by $dest_ref.
+sub download_file {
+ my $url = shift;
+ my $file = shift;
+ my $dest_ref = shift;
+
+ # If the variable referenced by $dest_ref is already set, skip downloading
+ # means we've been asked to use an already downloaded file.
+ return if (defined($$dest_ref));
+
+ print "Downloading $url\n";
+ # Using mirror() here so that repeated runs shouldn't try to keep downloading
+ # the file.
+ my $response = mirror($url, $file);
+ if (is_error($response)) {
+ die "Couldn't save $url to $file received $response";
+ }
+ $$dest_ref = $file;
+}
+
+# Download all the dependencies we need
+sub download_dependencies {
+ # putting awk in sources is a bit of a hack but it lets us
+ # avoid having to figure out what to delete when cleaning bin
+ download_file($AWK_URL, "$SRCDIR\\awk.exe", \$AWK_FILE);
+ unless(-x "$BINDIR\\awk.exe") { # skip the copy if it exists
+ copy_or_die($AWK_FILE, "$BINDIR\\awk.exe");
+ }
+ download_file($PROJREF_URL, "$SRCDIR\\ProjRef.py", \$PROJREF_FILE);
+ unless(-x "$BINDIR\\ProjRef.py") { # skip the copy if it exists
+ copy_or_die($PROJREF_FILE, $BINDIR);
+ }
+ download_file($BDB_URL, "$SRCDIR\\db.zip", \$BDB_FILE);
+ download_file($ZLIB_URL, "$SRCDIR\\zlib.zip", \$ZLIB_FILE);
+ download_file($OPENSSL_URL, "$SRCDIR\\openssl.tar.gz", \$OPENSSL_FILE);
+ download_file($HTTPD_URL, "$SRCDIR\\httpd.tar.bz2", \$HTTPD_FILE);
+ download_file($APR_URL, "$SRCDIR\\apr.tar.bz2", \$APR_FILE);
+ download_file($APU_URL, "$SRCDIR\\apr-util.tar.bz2", \$APU_FILE);
+ download_file($API_URL, "$SRCDIR\\apr-iconv.tar.bz2", \$API_FILE);
+ download_file($PCRE_URL, "$SRCDIR\\pcre.zip", \$PCRE_FILE);
+ download_file($SQLITE_URL, "$SRCDIR\\sqlite-amalgamation.zip", \$SQLITE_FILE);
+ download_file($SERF_URL, "$SRCDIR\\serf.zip", \$SERF_FILE);
+ download_file($NEON_URL, "$SRCDIR\\neon.tar.gz", \$NEON_FILE) if $NEON;
+}
+
+##############
+# EXTRACTION #
+##############
+
+# Extract a compressed file with 7-zip into a given directory
+# Skip extraction if destination of rename_to or expected_name exists
+# if rename_to is set rename the path from expected_name to rename_to
+sub extract_file {
+ my $file = shift;
+ my $container = shift;
+ my $expected_name = shift;
+ my $rename_to = shift;
+
+ if (defined($rename_to)) {
+ return if -d $rename_to;
+ } elsif (defined($expected_name)) {
+ return if -d $expected_name;
+ }
+
+ my $dest_opt = "";
+ if (defined($container)) {
+ $dest_opt = qq(-o"$container" );
+ }
+
+ my $cmd;
+ if ($file =~ /\.tar\.(bz2|gz)$/) {
+ $cmd = qq("$SEVEN_ZIP" x "$file" -so | "$SEVEN_ZIP" x -y -si -ttar $dest_opt);
+ } else {
+ $cmd = qq("$SEVEN_ZIP" x -y $dest_opt $file);
+ }
+
+ system_or_die("Problem extracting $file", $cmd);
+ if (defined($rename_to)) {
+ rename_or_die($expected_name, $rename_to);
+ }
+}
+
+sub extract_dependencies {
+ extract_file($BDB_FILE, $BLDDIR,
+ "$BLDDIR\\db-$BDB_VER", "$BLDDIR\\bdb");
+ extract_file($HTTPD_FILE, $BLDDIR,
+ "$BLDDIR\\httpd-$HTTPD_VER", "$BLDDIR\\httpd");
+ extract_file($APR_FILE, $SRCLIB,
+ "$SRCLIB\\apr-$APR_VER", "$SRCLIB\\apr");
+ extract_file($APU_FILE, $SRCLIB,
+ "$SRCLIB\\apr-util-$APU_VER", "$SRCLIB\\apr-util");
+ extract_file($API_FILE, $SRCLIB,
+ "$SRCLIB\\apr-iconv-$API_VER", "$SRCLIB\\apr-iconv");
+ # We fix the line endings before putting the non-Apache deps in place since it
+ # touches everything under httpd and there's no point in doing other things.
+ httpd_fix_lineends();
+ extract_file($ZLIB_FILE, $SRCLIB,
+ "$SRCLIB\\zlib-$ZLIB_VER", "$SRCLIB\\zlib");
+ extract_file($OPENSSL_FILE, $SRCLIB,
+ "$SRCLIB\\openssl-$OPENSSL_VER", "$SRCLIB\\openssl");
+ extract_file($PCRE_FILE, $SRCLIB,
+ "$SRCLIB\\pcre-$PCRE_VER", "$SRCLIB\\pcre");
+ extract_file($SQLITE_FILE, $INSTDIR,
+ "$INSTDIR\\sqlite-amalgamation-$SQLITE_VER",
+ "$INSTDIR\\sqlite-amalgamation");
+ extract_file($SERF_FILE, $INSTDIR,
+ "$INSTDIR\\serf-$SERF_VER", "$INSTDIR\\serf");
+ extract_file($NEON_FILE, $INSTDIR,
+ "$INSTDIR\\neon-$NEON_VER", "$INSTDIR\\neon") if $NEON;
+}
+
+#########
+# BUILD #
+#########
+
+sub build_pcre {
+ chdir_or_die("$SRCLIB\\pcre");
+ my $pcre_generator = 'NMake Makefiles';
+ # Have to use RelWithDebInfo since httpd looks for the pdb files
+ my $pcre_build_type = '-DCMAKE_BUILD_TYPE:STRING=' . ($DEBUG ? 'Debug' : 'RelWithDebInfo');
+ my $pcre_options = '-DPCRE_NO_RECURSE:BOOL=ON';
+ my $pcre_shared_libs = '-DBUILD_SHARED_LIBS:BOOL=ON';
+ my $pcre_install_prefix = "-DCMAKE_INSTALL_PREFIX:PATH=$INSTDIR";
+ my $cmake_cmd = qq("$CMAKE" -G "$pcre_generator" "$pcre_build_type" "$pcre_shared_libs" "$pcre_install_prefix" "$pcre_options" .);
+ system_or_die("Failure generating pcre Makefiles", $cmake_cmd);
+ system_or_die("Failure building pcre", qq("$NMAKE"));
+ system_or_die("Failure testing pcre", qq("$NMAKE" test));
+ system_or_die("Failure installing pcre", qq("$NMAKE" install));
+ chdir_or_die($TOPDIR);
+}
+
+# This is based roughly off the build_zlib.bat that the Subversion Windows
+# build generates, it it doesn't match that then Subversion will fail to build.
+sub build_zlib {
+ chdir_or_die("$SRCLIB\\zlib");
+ $ENV{CC_OPTS} = $DEBUG ? '/MDd /Gm /ZI /Od /GZ /D_DEBUG' : '/MD /02 /Zi';
+ $ENV{COMMON_CC_OPTS} = '/nologo /W3 /DWIN32 /D_WINDOWS';
+
+ system_or_die("Failure building zilb", qq("$NMAKE" /nologo -f win32\\Makefile.msc STATICLIB=zlibstat.lib all));
+
+ delete $ENV{CC_OPTS};
+ delete $ENV{COMMON_CC_OPTS};
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_openssl {
+ chdir_or_die("$SRCLIB\\openssl");
+
+ # We're building openssl without an assembler. If someone wants to
+ # use this for production they should probably download NASM and
+ # remove the no-asm below and use ms\do_nasm.bat instead.
+
+ # TODO: Enable openssl to use zlib. openssl needs some patching to do
+ # this since it wants to look for zlib as zlib1.dll and as the httpd
+ # build instructions note you probably don't want to dynamic link zlib.
+
+ # TODO: OpenSSL requires perl on the path since it uses perl without a full
+ # path in the batch file and the makefiles. Probably should determine
+ # if PERL is on the path and add it here if not.
+
+ # The apache build docs suggest no-rc5 no-idea enable-mdc2 on top of what
+ # is used below, the primary driver behind that is patents, but I believe
+ # the rc5 and idea patents have expired.
+ my $platform = $DEBUG ? 'debug-VC-WIN32' : 'VC-WIN32';
+ system_or_die("Failure configuring openssl",
+ qq("$PERL" Configure no-asm "--prefix=$INSTDIR" $platform));
+ system_or_die("Failure building openssl (bat)", 'ms\do_ms.bat');
+ system_or_die("Failure building openssl (nmake)", qq("$NMAKE" /f ms\\ntdll.mak));
+ system_or_die("Failure testing openssl", qq("$NMAKE" /f ms\\ntdll.mak test));
+ system_or_die("Failure installing openssl",
+ qq("$NMAKE" /f ms\\ntdll.mak install));
+ chdir_or_die($TOPDIR);
+}
+
+# Run devenv /Upgrade on file.
+# If the file isn't a .sln file and the sln file isn't empty shortcut this
+# If the file isn't a .sln file touch the basename.sln of file to avoid
+# Visual Studio whining about its backup step.
+sub upgrade_solution {
+ my $file = shift;
+ my $interactive = shift;
+ my $flags = "";
+
+ my ($basename, $directories) = fileparse($file, qr/\.[^.]*$/);
+ my $sln = $directories . $basename . '.sln';
+ return if $file ne $sln and -s $sln; # shortcut if sln file is unique and isn't empty
+ # 'touch' the sln file so that Visual Studio 2012
+ # doesn't try to say there was an error while upgrading because
+ # it was unable to backup the original solution file.
+ unless (-e $sln) {
+ open(SLN, ">$sln") or die "Can't create $sln: $!";
+ close(SLN);
+ }
+ print "Upgrading $file (this may take a while)\n";
+ $flags = " /Upgrade" unless $interactive;
+ system_or_die("Failure upgrading $file", qq("$DEVENV" "$file"$flags));
+ if ($interactive) {
+ print "Can't do automatic upgrade, doing interactive upgrade\n";
+ print "IDE will load, choose to convert all projects, exit the IDE and\n";
+ print "save the resulting solution file\n\n";
+ print "Press Enter to Continue\n";
+ <>;
+ }
+}
+
+# Run the lineends.pl script
+sub httpd_fix_lineends {
+ chdir_or_die($HTTPD);
+ # This script fixes the lineendings to be CRLF in appropriate files.
+ # If we don't run this script then the DSW Upgrade will fail.
+ system_or_die(undef, qq("$PERL" "$SRCLIB\\apr\\build\\lineends.pl"));
+ chdir_or_die($TOPDIR);
+}
+
+# The httpd makefile in 2.4.4 doesn't know about .vcxproj files and
+# still thinks it's got an older version of Visual Studio because
+# .vcproj files have become .vcxproj.
+sub httpd_fix_makefile {
+ my $file = shift;
+
+ modify_file_in_place($file, sub {
+ s/\.vcproj/.vcxproj/i;
+ # below fixes that installd breaks when trying to install pcre because
+ # dll is named pcred.dll when a Debug build.
+ s/^(\s*copy srclib\\pcre\\pcre\.\$\(src_dll\)\s+"\$\(inst_dll\)"\s+<\s*\.y\s*)$/!IF EXISTS("srclib\\pcre\\pcre\.\$(src_dll)")\n$1!ENDIF\n!IF EXISTS("srclib\\pcre\\pcred\.\$(src_dll)")\n\tcopy srclib\\pcre\\pcred.\$(src_dll)\t\t\t"\$(inst_dll)" <.y\n!ENDIF\n/;
+ });
+}
+
+# This is a poor mans way of inserting a property group into a
+# vcxproj file. It assumes that the ending Project tag will
+# be the start and end of the line with no whitespace, probably
+# not an entirely valid assumption but it works in this case.
+sub insert_property_group {
+ my $file = shift;
+ my $xml = shift;
+ my $bak = shift;
+
+ modify_file_in_place($file, sub {
+ s#(^</Project>$)#<PropertyGroup>$xml</PropertyGroup>\n$1#i;
+ }, $bak);
+}
+
+# Strip pre-compiled headers compile and linker flags from file they follow
+# the form: /Ycfoo.h or /Yufoo.h.
+sub disable_pch {
+ my $file = shift;
+
+ modify_file_in_place($file, sub {
+ s#/Y[cu][^ ]+##;
+ });
+}
+
+# Find the first .exe .dll or .so OutputFile in the project
+# provided by file. There may be macros or paths in the
+# result.
+sub get_output_file {
+ my $file = shift;
+ my $result;
+ local $_; # Don't mess with the $_ from the find callback
+
+ open(IN, "<$file") or die "Couldn't open file $file: $!";
+ while (<IN>) {
+ if (m#<OutputFile>(.*?\.(?:exec|dll|so))</OutputFile>#) {
+ $result = $1;
+ last;
+ }
+ }
+ close(IN);
+ return $result;
+}
+
+# Find the name of the bdb library we've installed in our LIBDIR.
+sub find_bdb_lib {
+ my $result;
+ my $debug = $DEBUG ? 'd' : '';
+ find(sub {
+ if (not defined($result) and /^libdb\d+$debug\.lib$/) {
+ $result = $_;
+ }
+ }, $LIBDIR);
+ return $result;
+}
+
+# Insert the dependency dep into project file.
+# bak can be set to set the backup filename made of the project.
+sub insert_dependency_in_proj {
+ my $file = shift;
+ my $dep = shift;
+ my $bak = shift;
+
+ modify_file_in_place($file, sub {
+ s/(%\(AdditionalDependencies\))/$dep;$1/;
+ }, $bak);
+}
+
+# Do what's needed to enable BDB in the httpd and apr-util builds
+sub httpd_enable_bdb {
+ # Make APU_HAVE_DB be true so the code builds.
+ modify_file_in_place('srclib\apr-util\include\apu.hw', sub {
+ s/(#define\s+APU_HAVE_DB\s+)0/${1}1/;
+ });
+
+ # Fix the linkage, apr_dbm_db is hardcoded to libdb47.lib
+ my $bdb_lib = find_bdb_lib();
+ modify_file_in_place('srclib\apr-util\dbm\apr_dbm_db.vcxproj', sub {
+ s/libdb\d+\.lib/$bdb_lib/g;
+ }, '.bdb');
+
+ # httxt2dbm and htdbm need a BDB dependency and don't have one.
+ insert_dependency_in_proj('support\httxt2dbm.vcxproj', $bdb_lib, '.bdb');
+ insert_dependency_in_proj('support\htdbm.vcxproj', $bdb_lib, '.bdb');
+}
+
+# Apply the same fix as found in r1486937 on httpd 2.4.x branch.
+sub httpd_fix_debug {
+ my ($httpd_major, $httpd_minor, $httpd_patch) = $HTTPD_VER =~ /^(\d+)\.(\d+)\.(.+)$/;
+ return unless ($httpd_major <= 2 && $httpd_minor <= 4 && $httpd_patch < 5);
+
+ modify_file_in_place('libhttpd.dsp', sub {
+ s/^(!MESSAGE "libhttpd - Win32 Debug" \(based on "Win32 \(x86\) Dynamic-Link Library"\))$/$1\n!MESSAGE "libhttpd - Win32 Lexical" (based on "Win32 (x86) Dynamic-Link Library")/;
+ s/^(# Begin Group "headers")$/# Name "libhttpd - Win32 Lexical"\n$1/;
+ }, '.lexical');
+}
+
+sub build_httpd {
+ chdir_or_die($HTTPD);
+
+ my $vs_2013 = $VS_VER eq '2013';
+ my $vs_2012 = $VS_VER eq '2012';
+ my $vs_2010 = $VS_VER eq '2010';
+
+ httpd_fix_debug();
+
+ # I don't think cvtdsp.pl is necessary with Visual Studio 2012
+ # but it shouldn't hurt anything either. Including it allows
+ # for the possibility that this may work for older Visual Studio
+ # versions.
+ system_or_die("Failure converting DSP files",
+ qq("$PERL" srclib\\apr\\build\\cvtdsp.pl -2005));
+
+ upgrade_solution('Apache.dsw', $vs_2010);
+ httpd_enable_bdb();
+ httpd_fix_makefile('Makefile.win');
+
+ # Modules and support projects randomly fail due to an error about the
+ # CL.read.1.tlog file already existing. This is really because of the
+ # intermediate dirs being shared between modules, but for the time being
+ # this works around it.
+ find(sub {
+ if (/\.vcxproj$/) {
+ insert_property_group($_, '<TrackFileAccess>false</TrackFileAccess>')
+ }
+ }, 'modules', 'support');
+
+ if ($vs_2012 or $vs_2013) {
+ # Turn off pre-compiled headers for apr-iconv to avoid:
+ # LNK2011: http://msdn.microsoft.com/en-us/library/3ay26wa2(v=vs.110).aspx
+ disable_pch('srclib\apr-iconv\build\modules.mk.win');
+
+ # ApacheMonitor build fails due a duplicate manifest, turn off
+ # GenerateManifest
+ insert_property_group('support\win32\ApacheMonitor.vcxproj',
+ '<GenerateManifest>false</GenerateManifest>',
+ '.dupman');
+
+ # The APR libraries have projects named libapr but produce output named libapr-1
+ # The problem with this is in newer versions of Visual Studio TargetName defaults
+ # to the project name and not the basename of the output. Since the PDB file
+ # is named based on the TargetName the pdb file ends up being named libapr.pdb
+ # instead of libapr-1.pdb. The below call fixes this by explicitly providing
+ # a TargetName definition and shuts up some warnings about this problem as well.
+ # Without this fix the install fails when it tries to copy libapr-1.pdb.
+ # See this thread for details of the changes:
+ # http://social.msdn.microsoft.com/Forums/en-US/vcprerelease/thread/3c03e730-6a0e-4ee4-a0d6-6a5c3ce4343c
+ find(sub {
+ return unless (/\.vcxproj$/);
+ my $output_file = get_output_file($_);
+ return unless (defined($output_file));
+ my ($project_name) = fileparse($_, qr/\.[^.]*$/);
+ my ($old_style_target_name) = fileparse($output_file, qr/\.[^.]*$/);
+ return if ($old_style_target_name eq $project_name);
+ insert_property_group($_,
+ "<TargetName>$old_style_target_name</TargetName>", '.torig');
+ }, "$SRCLIB\\apr", "$SRCLIB\\apr-util", "$SRCLIB\\apr-iconv");
+ } elsif ($vs_2010) {
+ system_or_die("Failed fixing project guid references",
+ qq("$PYTHON" "$BINDIR\\ProjRef.py" -i Apache.sln"));
+ }
+
+ # If you're looking here it's possible that something went
+ # wrong with the httpd build. Debugging it can be a bit of a pain
+ # when using this script. There are log files created in the
+ # Release dirs named with the same basename as the project. E.G.
+ # for support\httxt2dbm.vcxproj you can find the log in
+ # support\Release\httxt2dbm.log. You can also run a similar build
+ # from in the IDE, but you'll need to disable some projects since
+ # they are separately driven by the Makefile.win. Grepping for
+ # '/project' in Makefile.win should tell you which projects. You'll
+ # also need to add the bin, include and lib paths to the appropriate
+ # configurations inside the project since we get them from the environment.
+ # Once all that is done the BuildBin project should be buildable for you to
+ # diagnose the problem.
+ my $target = $DEBUG ? "installd" : "installr";
+ system_or_die("Failed building/installing httpd/apr/apu/api",
+ qq("$NMAKE" /f Makefile.win $target "DBM_LIST=db" "INSTDIR=$INSTDIR"));
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_bdb {
+ chdir_or_die($BDB);
+
+ print(cwd(),$/);
+ my $sln = 'build_windows\Berkeley_DB_vs2010.sln';
+ upgrade_solution($sln);
+
+ my $platform = $DEBUG ? 'Debug|Win32' : 'Release|Win32';
+
+ # Build the db Project first since the full solution fails due to a broken
+ # dependency with the current version of BDB if we don't.
+ system_or_die("Failed building DBD (Project db)",
+ qq("$DEVENV" "$sln" /Build "$platform" /Project db));
+
+ system_or_die("Failed building DBD",
+ qq("$DEVENV" "$sln" /Build "$platform"));
+
+ # BDB doesn't seem to have it's own install routines so we'll do it ourselves
+ copy_or_die('build_windows\db.h', $INCDIR);
+ find(sub {
+ if (/\.(exe|dll|pdb)$/) {
+ copy_or_die($_, $BINDIR);
+ } elsif (/\.lib$/) {
+ copy_or_die($_, $LIBDIR);
+ }
+ }, 'build_windows\\Win32\\' . ($DEBUG ? 'Debug' : 'Release'));
+
+ chdir_or_die($TOPDIR);
+}
+
+# Right now this doesn't actually build serf but just patches it so that it
+# can build against a debug build of OpenSSL.
+sub build_serf {
+ chdir_or_die("$TOPDIR\\serf");
+
+ modify_file_in_place('serf.mak', sub {
+ s/^(INTDIR = Release)$/$1\nOPENSSL_OUT_SUFFIX =/;
+ s/^(INTDIR = Debug)$/$1\nOPENSSL_OUT_SUFFIX = .dbg/;
+ s/(\$\(OPENSSL_SRC\)\\out32(?:dll)?)/$1\$(OPENSSL_OUT_SUFFIX)/g;
+ }, '.debug');
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_dependencies {
+ build_bdb();
+ build_zlib();
+ build_pcre();
+ build_openssl();
+ build_serf();
+ build_httpd();
+}
+
+###############
+# COMMANDLINE #
+###############
+
+# Implement an interface somewhat similar to the make command line
+# You can give a list of commands and variable assignments interspersed.
+# Variable assignments are always VAR=VALUE with no spaces (in a single
+# argv entry).
+sub main {
+ my @commands;
+ while (my $arg = shift @ARGV) {
+ # Look for variable assignment
+ if (my ($lhs, $rhs) = $arg =~ /([^=]+)=(.*)/) {
+ # Bit of hackery to allow the global values in the
+ # Vars package to be overriden from the command line.
+ # E.G. "CMAKE=C:\CMake\cmake.exe" would replace the
+ # default value with this value.
+ if (exists($Vars::{$lhs})) {
+ ${$Vars::{$lhs}} = $rhs;
+ } else {
+ # Don't allow variables that don't exist already to be touched.
+ die "$lhs is an unknown variable.";
+ }
+ } else {
+ # Not a variable so must be a command
+ push @commands, $arg;
+ }
+ }
+
+ # No commands so add the implicit all command
+ if ($#commands == -1) {
+ push @commands, 'all';
+ }
+
+ # Set defaults and paths that have to be set at runtime since they are based
+ # on other variables.
+ Vars::set_defaults();
+ set_paths();
+
+ # Determine the Visual Studio Version and die if not supported.
+ check_vs_ver();
+
+ # change directory to our TOPDIR before running any commands
+ # the variable assignment might have changed it.
+ chdir_or_die($TOPDIR);
+
+ # Run the commands in the order given.
+ foreach my $command (@commands) {
+ if ($command eq 'clean') {
+ clean_structure(0);
+ } elsif ($command eq 'real-clean') {
+ clean_structure(1);
+ } elsif ($command eq 'prepare') {
+ prepare_structure();
+ } elsif ($command eq 'download') {
+ download_dependencies();
+ } elsif ($command eq 'extract') {
+ extract_dependencies();
+ } elsif ($command eq 'all') {
+ prepare_structure();
+ download_dependencies();
+ extract_dependencies();
+ build_dependencies();
+ } else {
+ die "Command '$command' is unknown";
+ }
+ }
+}
+
+main();
diff --git a/tools/dev/fsfs-access-map.c b/tools/dev/fsfs-access-map.c
index 5fbd221..ac65182 100644
--- a/tools/dev/fsfs-access-map.c
+++ b/tools/dev/fsfs-access-map.c
@@ -52,9 +52,15 @@ typedef struct file_stats_t
/* number of lseek calls to clusters not previously read */
apr_int64_t uncached_seek_count;
+ /* number of lseek counts not followed by a read */
+ apr_int64_t unnecessary_seeks;
+
/* number of read() calls */
apr_int64_t read_count;
+ /* number of read() calls that returned 0 bytes */
+ apr_int64_t empty_reads;
+
/* total number of bytes returned by those reads */
apr_int64_t read_size;
@@ -86,12 +92,17 @@ typedef struct handle_info_t
/* bytes read so far in the current series of reads started (default: 0) */
apr_int64_t last_read_size;
+ /* number of read() calls in this series */
+ apr_int64_t read_count;
} handle_info_t;
/* useful typedef */
typedef unsigned char byte;
typedef unsigned short word;
+/* an RGB color */
+typedef byte color_t[3];
+
/* global const char * file name -> *file_info_t map */
static apr_hash_t *files = NULL;
@@ -136,6 +147,11 @@ store_read_info(handle_info_t *handle_info)
++*count;
}
}
+ else if (handle_info->read_count == 0)
+ {
+ /* two consecutive seeks */
+ handle_info->file->unnecessary_seeks++;
+ }
}
/* Handle a open() call. Ensures that a file_info_t for the given NAME
@@ -152,18 +168,18 @@ open_file(const char *name, int handle)
if (!file)
{
apr_pool_t *pool = apr_hash_pool_get(files);
- apr_pool_t *sub_pool = svn_pool_create(pool);
+ apr_pool_t *subpool = svn_pool_create(pool);
apr_file_t *apr_file = NULL;
apr_finfo_t finfo = { 0 };
- apr_size_t cluster_count = 0;
+ int cluster_count = 0;
/* determine file size (if file still exists) */
apr_file_open(&apr_file, name,
- APR_READ | APR_BUFFERED, APR_OS_DEFAULT, sub_pool);
+ APR_READ | APR_BUFFERED, APR_OS_DEFAULT, subpool);
if (apr_file)
apr_file_info_get(&finfo, APR_FINFO_SIZE, apr_file);
- svn_pool_destroy(sub_pool);
+ svn_pool_destroy(subpool);
file = apr_pcalloc(pool, sizeof(*file));
file->name = apr_pstrdup(pool, name);
@@ -171,7 +187,7 @@ open_file(const char *name, int handle)
/* pre-allocate cluster map accordingly
* (will be auto-expanded later if necessary) */
- cluster_count = (apr_size_t)(1 + (file->size - 1) / cluster_size);
+ cluster_count = (int)(1 + (file->size - 1) / cluster_size);
file->read_map = apr_array_make(pool, file->size
? cluster_count
: 1, sizeof(word));
@@ -188,6 +204,14 @@ open_file(const char *name, int handle)
else
file->rev_num = -1;
+ /* filter out log/phys index files */
+ if (file->rev_num >= 0)
+ {
+ const char *suffix = name + strlen(name) - 4;
+ if (strcmp(suffix, ".l2p") == 0 || strcmp(suffix, ".p2l") == 0)
+ file->rev_num = -1;
+ }
+
apr_hash_set(files, file->name, APR_HASH_KEY_STRING, file);
}
@@ -220,9 +244,13 @@ read_file(int handle, apr_int64_t count)
{
/* known file handle -> expand current read sequence */
+ handle_info->read_count++;
handle_info->last_read_size += count;
handle_info->file->read_count++;
handle_info->file->read_size += count;
+
+ if (count == 0)
+ handle_info->file->empty_reads++;
}
}
@@ -242,6 +270,7 @@ seek_file(int handle, apr_int64_t location)
handle_info->last_read_size = 0;
handle_info->last_read_start = location;
+ handle_info->read_count = 0;
handle_info->file->seek_count++;
/* if we seek to a location that had not been read from before,
@@ -275,10 +304,17 @@ parse_line(svn_stringbuf_t *line)
char *return_value = strrchr(line->data, ' ');
char *first_param_end;
apr_int64_t func_return = 0;
+ char *func_start = strchr(line->data, ' ');
if (func_end == NULL || return_value == NULL)
return;
+ if (func_start == NULL || func_start > func_end)
+ func_start = line->data;
+ else
+ while(*func_start == ' ')
+ func_start++;
+
first_param_end = strchr(func_end, ',');
if (first_param_end == NULL)
first_param_end = strchr(func_end, ')');
@@ -295,7 +331,7 @@ parse_line(svn_stringbuf_t *line)
svn_error_clear(svn_cstring_atoi64(&func_return, return_value));
/* process those operations that we care about */
- if (strcmp(line->data, "open") == 0)
+ if (strcmp(func_start, "open") == 0)
{
/* remove double quotes from file name parameter */
*func_end++ = 0;
@@ -303,11 +339,11 @@ parse_line(svn_stringbuf_t *line)
open_file(func_end, (int)func_return);
}
- else if (strcmp(line->data, "read") == 0)
+ else if (strcmp(func_start, "read") == 0)
read_file(atoi(func_end), func_return);
- else if (strcmp(line->data, "lseek") == 0)
+ else if (strcmp(func_start, "lseek") == 0)
seek_file(atoi(func_end), func_return);
- else if (strcmp(line->data, "close") == 0)
+ else if (strcmp(func_start, "close") == 0)
close_file(atoi(func_end));
}
@@ -317,7 +353,7 @@ static void
parse_file(apr_file_t *file)
{
apr_pool_t *pool = svn_pool_create(NULL);
- apr_pool_t *iter_pool = svn_pool_create(pool);
+ apr_pool_t *iterpool = svn_pool_create(pool);
/* limit lines to 4k (usually, we need less than 200 bytes) */
svn_stringbuf_t *line = svn_stringbuf_create_ensure(4096, pool);
@@ -327,13 +363,13 @@ parse_file(apr_file_t *file)
svn_error_t *err = NULL;
line->len = line->blocksize-1;
- err = svn_io_read_length_line(file, line->data, &line->len, iter_pool);
+ err = svn_io_read_length_line(file, line->data, &line->len, iterpool);
svn_error_clear(err);
if (err)
break;
parse_line(line);
- svn_pool_clear(iter_pool);
+ svn_pool_clear(iterpool);
}
while (line->len > 0);
}
@@ -494,17 +530,82 @@ write_bitmap_header(apr_file_t *file, int xsize, int ysize)
apr_file_write(file, header, &written);
}
-/* write the cluster read map for all files in INFO as BMP image to FILE.
+/* To COLOR, add the fractional value of SOURCE from fractional indexes
+ * SOURCE_START to SOURCE_END and apply the SCALING_FACTOR.
+ */
+static void
+add_sample(color_t color,
+ color_t *source,
+ double source_start,
+ double source_end,
+ double scaling_factor)
+{
+ double factor = (source_end - source_start) / scaling_factor;
+
+ apr_size_t i;
+ for (i = 0; i < sizeof(color_t) / sizeof(*color); ++i)
+ color[i] += (source_end - source_start < 0.5) && source_start > 1.0
+ ? factor * source[(apr_size_t)source_start - 1][i]
+ : factor * source[(apr_size_t)source_start][i];
+}
+
+/* Scale the IN_LEN RGB values from IN to OUT_LEN RGB values in OUT.
+ */
+static void
+scale_line(color_t* out,
+ int out_len,
+ color_t *in,
+ int in_len)
+{
+ double scaling_factor = (double)(in_len) / (double)(out_len);
+
+ apr_size_t i;
+ memset(out, 0, out_len * sizeof(color_t));
+ for (i = 0; i < out_len; ++i)
+ {
+ color_t color = { 0 };
+
+ double source_start = i * scaling_factor;
+ double source_end = (i + 1) * scaling_factor;
+
+ if ((apr_size_t)source_start == (apr_size_t)source_end)
+ {
+ add_sample(color, in, source_start, source_end, scaling_factor);
+ }
+ else
+ {
+ apr_size_t k;
+ apr_size_t first_sample_end = (apr_size_t)source_start + 1;
+ apr_size_t last_sample_start = (apr_size_t)source_end;
+
+ add_sample(color, in, source_start, first_sample_end, scaling_factor);
+ for (k = first_sample_end; k < last_sample_start; ++k)
+ add_sample(color, in, k, k + 1, scaling_factor);
+
+ add_sample(color, in, last_sample_start, source_end, scaling_factor);
+ }
+
+ memcpy(out[i], color, sizeof(color));
+ }
+}
+
+/* Write the cluster read map for all files in INFO as BMP image to FILE.
+ * If MAX_X is not 0, scale all lines to MAX_X pixels. Use POOL for
+ * allocations.
*/
static void
-write_bitmap(apr_array_header_t *info, apr_file_t *file)
+write_bitmap(apr_array_header_t *info,
+ int max_x,
+ apr_file_t *file,
+ apr_pool_t *pool)
{
int ysize = info->nelts;
int xsize = 0;
int x, y;
- int row_size;
- int padding;
+ apr_size_t row_size;
apr_size_t written;
+ color_t *line, *scaled_line;
+ svn_boolean_t do_scale = max_x > 0;
/* xsize = max cluster number */
for (y = 0; y < ysize; ++y)
@@ -516,37 +617,40 @@ write_bitmap(apr_array_header_t *info, apr_file_t *file)
xsize = 0x3fff;
if (ysize >= 0x4000)
ysize = 0x3fff;
+ if (max_x == 0)
+ max_x = xsize;
/* rows in BMP files must be aligned to 4 bytes */
- row_size = APR_ALIGN(xsize * 3, 4);
- padding = row_size - xsize * 3;
+ row_size = APR_ALIGN(max_x * sizeof(color_t), 4);
+
+ /**/
+ line = apr_pcalloc(pool, xsize * sizeof(color_t));
+ scaled_line = apr_pcalloc(pool, row_size);
/* write header to file */
- write_bitmap_header(file, xsize, ysize);
+ write_bitmap_header(file, max_x, ysize);
/* write all rows */
for (y = 0; y < ysize; ++y)
{
file_stats_t *file_info = APR_ARRAY_IDX(info, y, file_stats_t *);
+ int block_count = file_info->read_map->nelts;
for (x = 0; x < xsize; ++x)
{
- byte color[3] = { 128, 128, 128 };
- if (x < file_info->read_map->nelts)
+ color_t color = { 128, 128, 128 };
+ if (x < block_count)
{
word count = APR_ARRAY_IDX(file_info->read_map, x, word);
select_color(color, count);
}
- written = sizeof(color);
- apr_file_write(file, color, &written);
+ memcpy(line[x], color, sizeof(color));
}
- if (padding)
- {
- char pad[3] = { 0 };
- written = padding;
- apr_file_write(file, pad, &written);
- }
+ scale_line(scaled_line, max_x, line, block_count ? block_count : 1);
+
+ written = row_size;
+ apr_file_write(file, do_scale ? scaled_line : line, &written);
}
}
@@ -592,6 +696,8 @@ print_stats(apr_pool_t *pool)
apr_int64_t clusters_read = 0;
apr_int64_t unique_clusters_read = 0;
apr_int64_t uncached_seek_count = 0;
+ apr_int64_t unnecessary_seek_count = 0;
+ apr_int64_t empty_read_count = 0;
apr_hash_index_t *hi;
for (hi = apr_hash_first(pool, files); hi; hi = apr_hash_next(hi))
@@ -609,13 +715,17 @@ print_stats(apr_pool_t *pool)
clusters_read += file->clusters_read;
unique_clusters_read += file->unique_clusters_read;
uncached_seek_count += file->uncached_seek_count;
+ unnecessary_seek_count += file->unnecessary_seeks;
+ empty_read_count += file->empty_reads;
}
printf("%20s files\n", svn__i64toa_sep(apr_hash_count(files), ',', pool));
printf("%20s files opened\n", svn__i64toa_sep(open_count, ',', pool));
printf("%20s seeks\n", svn__i64toa_sep(seek_count, ',', pool));
+ printf("%20s unnecessary seeks\n", svn__i64toa_sep(unnecessary_seek_count, ',', pool));
printf("%20s uncached seeks\n", svn__i64toa_sep(uncached_seek_count, ',', pool));
printf("%20s reads\n", svn__i64toa_sep(read_count, ',', pool));
+ printf("%20s empty reads\n", svn__i64toa_sep(empty_read_count, ',', pool));
printf("%20s unique clusters read\n", svn__i64toa_sep(unique_clusters_read, ',', pool));
printf("%20s clusters read\n", svn__i64toa_sep(clusters_read, ',', pool));
printf("%20s bytes read\n", svn__i64toa_sep(read_size, ',', pool));
@@ -629,7 +739,7 @@ print_usage(void)
printf("Reads strace of some FSFS-based tool from <file>, prints some stats\n");
printf("and writes a cluster access map to 'access.bmp' the current folder.\n");
printf("Each pixel corresponds to one 64kB cluster and every line to a rev\n");
- printf("or packed rev file in the repository. Turquoise and greed indicate\n");
+ printf("or packed rev file in the repository. Turquoise and green indicate\n");
printf("1 and 2 hits, yellow to read-ish colors for up to 20, shares of\n");
printf("for up to 100 and black for > 200 hits.\n\n");
printf("A typical strace invocation looks like this:\n");
@@ -665,7 +775,13 @@ int main(int argc, const char *argv[])
apr_file_open(&file, "access.bmp",
APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
APR_OS_DEFAULT, pool);
- write_bitmap(get_rev_files(pool), file);
+ write_bitmap(get_rev_files(pool), 0, file, pool);
+ apr_file_close(file);
+
+ apr_file_open(&file, "access_scaled.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_bitmap(get_rev_files(pool), 1024, file, pool);
apr_file_close(file);
apr_file_open(&file, "scale.bmp",
@@ -675,4 +791,4 @@ int main(int argc, const char *argv[])
apr_file_close(file);
return 0;
-} \ No newline at end of file
+}
diff --git a/tools/dev/fsfs-reorg.c b/tools/dev/fsfs-reorg.c
deleted file mode 100644
index 052ad39..0000000
--- a/tools/dev/fsfs-reorg.c
+++ /dev/null
@@ -1,3147 +0,0 @@
-/* fsfs-reorg.c -- prototypic tool to reorganize packed FSFS repositories
- * to reduce seeks
- *
- * ====================================================================
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * ====================================================================
- */
-
-
-#include <assert.h>
-
-#include <apr.h>
-#include <apr_general.h>
-#include <apr_file_io.h>
-#include <apr_poll.h>
-
-#include "svn_pools.h"
-#include "svn_diff.h"
-#include "svn_io.h"
-#include "svn_utf.h"
-#include "svn_dirent_uri.h"
-#include "svn_sorts.h"
-#include "svn_delta.h"
-#include "svn_hash.h"
-
-#include "private/svn_string_private.h"
-#include "private/svn_subr_private.h"
-#include "private/svn_dep_compat.h"
-
-#ifndef _
-#define _(x) x
-#endif
-
-#define ERROR_TAG "fsfs-reporg: "
-
-/* forward declarations */
-typedef struct noderev_t noderev_t;
-typedef struct revision_info_t revision_info_t;
-
-/* A FSFS rev file is sequence of fragments and unused space (the latter
- * only being inserted by this tool and not during ordinary SVN operation).
- *
- * This type defines the type of any fragment.
- *
- * Please note that the classification as "property", "dir" or "file"
- * fragments is only to be used while determining the future placement
- * of a representation. If the rep is shared, the same rep may be used
- * as *any* of the 3 kinds.
- */
-enum fragment_kind_t
-{
- /* the 2 number line containing changes and root node offsets */
- header_fragment,
-
- /* list of all changes in a revision */
- changes_fragment,
-
- /* (the textual representation of) a noderev */
- noderev_fragment,
-
- /* a property rep (including PLAIN / DELTA header) */
- property_fragment,
-
- /* a directory rep (including PLAIN / DELTA header) */
- dir_fragment,
-
- /* a file rep (including PLAIN / DELTA header) */
- file_fragment
-};
-
-/* A fragment. This is used to represent the final ordering, i.e. there
- * will be an array containing elements of this type that basically put
- * a fragment at some location in the target file.
- */
-typedef struct fragment_t
-{
- /* position in the target file */
- apr_size_t position;
-
- /* kind of fragment */
- enum fragment_kind_t kind;
-
- /* pointer to the fragment struct; type depends on KIND */
- void *data;
-} fragment_t;
-
-/* Location info for a single revision.
- */
-typedef struct revision_location_t
-{
- /* pack file offset (manifest value), 0 for non-packed files */
- apr_size_t offset;
-
- /* offset of the changes list relative to OFFSET */
- apr_size_t changes;
-
- /* length of the changes list on bytes */
- apr_size_t changes_len;
-
- /* first offset behind the revision data in the pack file (file length
- * for non-packed revs) */
- apr_size_t end;
-} revision_location_t;
-
-/* Absolute position and size of some item.
- */
-typedef struct location_t
-{
- /* absolute offset in the file */
- apr_size_t offset;
-
- /* item length in bytes */
- apr_size_t size;
-} location_t;
-
-/* A parsed directory entry. Note that instances of this struct may be
- * shared between different DIRECTORY_T containers.
- */
-typedef struct direntry_t
-{
- /* (local) entry / path name */
- const char *name;
-
- /* strlen (name) */
- apr_size_t name_len;
-
- /* node rev providing ID and representation(s) */
- noderev_t *node;
-} direntry_t;
-
-/* Representation of a parsed directory content.
- */
-typedef struct directory_t
-{
- /* array of pointers to DIRENTRY_T */
- apr_array_header_t *entries;
-
- /* MD5 of the textual representation. Will be set lazily as a side-effect
- * of determining the length of this dir's textual representation. */
- unsigned char target_md5[16];
-
- /* (expanded) length of the textual representation.
- * Determined lazily during the write process. */
- apr_size_t size;
-} directory_t;
-
-/* A representation fragment.
- */
-typedef struct representation_t
-{
- /* location in the source file */
- location_t original;
-
- /* location in the reordered target file */
- location_t target;
-
- /* length of the PLAIN / DELTA line in the source file in bytes */
- apr_size_t header_size;
-
- /* deltification base, or NULL if there is none */
- struct representation_t *delta_base;
-
- /* revision that contains this representation
- * (may be referenced by other revisions, though) */
- revision_info_t *revision;
-
- /* representation content parsed as a directory. This will be NULL, if
- * *no* directory noderev uses this representation. */
- directory_t *dir;
-
- /* the source content has a PLAIN header, so we may simply copy the
- * source content into the target */
- svn_boolean_t is_plain;
-
- /* coloring flag used in the reordering algorithm to keep track of
- * representations that still need to be placed. */
- svn_boolean_t covered;
-} representation_t;
-
-/* A node rev.
- */
-struct noderev_t
-{
- /* location within the source file */
- location_t original;
-
- /* location within the reorganized target file. */
- location_t target;
-
- /* predecessor node, or NULL if there is none */
- noderev_t *predecessor;
-
- /* content representation; may be NULL if there is none */
- representation_t *text;
-
- /* properties representation; may be NULL if there is none */
- representation_t *props;
-
- /* revision that this noderev belongs to */
- revision_info_t *revision;
-
- /* coloring flag used in the reordering algorithm to keep track of
- * representations that still need to be placed. */
- svn_boolean_t covered;
-};
-
-/* Represents a single revision.
- * There will be only one instance per revision. */
-struct revision_info_t
-{
- /* number of this revision */
- svn_revnum_t revision;
-
- /* position in the source file */
- revision_location_t original;
-
- /* position in the reorganized target file */
- revision_location_t target;
-
- /* noderev of the root directory */
- noderev_t *root_noderev;
-
- /* all noderevs_t of this revision (ordered by source file offset),
- * i.e. those that point back to this struct */
- apr_array_header_t *node_revs;
-
- /* all representation_t of this revision (ordered by source file offset),
- * i.e. those that point back to this struct */
- apr_array_header_t *representations;
-};
-
-/* Represents a packed revision file.
- */
-typedef struct revision_pack_t
-{
- /* first revision in the pack file */
- svn_revnum_t base;
-
- /* revision_info_t* of all revisions in the pack file; in revision order. */
- apr_array_header_t *info;
-
- /* list of fragments to place in the target pack file; in target order. */
- apr_array_header_t *fragments;
-
- /* source pack file length */
- apr_size_t filesize;
-
- /* temporary value. Equal to the number of bytes in the target pack file
- * already allocated to fragments. */
- apr_size_t target_offset;
-} revision_pack_t;
-
-/* Cache for revision source content. All content is stored in DATA and
- * the HASH maps revision number to an svn_string_t instance whose data
- * member points into DATA.
- *
- * Once TOTAL_SIZE exceeds LIMIT, all content will be discarded. Similarly,
- * the hash gets cleared every 10000 insertions to keep the HASH_POOL
- * memory usage in check.
- */
-typedef struct content_cache_t
-{
- /* pool used for HASH */
- apr_pool_t *hash_pool;
-
- /* svn_revnum_t -> svn_string_t.
- * The strings become (potentially) invalid when adding new cache entries. */
- apr_hash_t *hash;
-
- /* data buffer. the first TOTAL_SIZE bytes are actually being used. */
- char *data;
-
- /* DATA capacity */
- apr_size_t limit;
-
- /* number of bytes used in DATA */
- apr_size_t total_size;
-
- /* number of insertions since the last hash cleanup */
- apr_size_t insert_count;
-} content_cache_t;
-
-/* A cached directory. In contrast to directory_t, this stored the data as
- * the plain hash that the normal FSFS will use to serialize & diff dirs.
- */
-typedef struct dir_cache_entry_t
-{
- /* revision containing the representation */
- svn_revnum_t revision;
-
- /* offset of the representation within that revision */
- apr_size_t offset;
-
- /* key-value representation of the directory entries */
- apr_hash_t *hash;
-} dir_cache_entry_t;
-
-/* Directory cache. (revision, offset) will be mapped directly into the
- * ENTRIES array of ENTRY_COUNT buckets (many entries will be NULL).
- * Two alternating pools will be used to allocate dir content.
- *
- * If the INSERT_COUNT exceeds a given limit, the pools get exchanged and
- * the older of the two will be cleared. This is to keep dir objects valid
- * for at least one insertion.
- */
-typedef struct dir_cache_t
-{
- /* fixed-size array of ENTRY_COUNT elements */
- dir_cache_entry_t *entries;
-
- /* currently used for entry allocations */
- apr_pool_t *pool1;
-
- /* previously used for entry allocations */
- apr_pool_t *pool2;
-
- /* size of ENTRIES in elements */
- apr_size_t entry_count;
-
- /* number of directory elements added. I.e. usually >> #cached dirs */
- apr_size_t insert_count;
-} dir_cache_t;
-
-/* A cached, undeltified txdelta window.
- */
-typedef struct window_cache_entry_t
-{
- /* revision containing the window */
- svn_revnum_t revision;
-
- /* offset of the deltified window within that revision */
- apr_size_t offset;
-
- /* window content */
- svn_stringbuf_t *window;
-} window_cache_entry_t;
-
-/* Cache for undeltified txdelta windows. (revision, offset) will be mapped
- * directly into the ENTRIES array of INSERT_COUNT buckets (most entries
- * will be NULL).
- *
- * The cache will be cleared when USED exceeds CAPACITY.
- */
-typedef struct window_cache_t
-{
- /* fixed-size array of ENTRY_COUNT elements */
- window_cache_entry_t *entries;
-
- /* used to allocate windows */
- apr_pool_t *pool;
-
- /* size of ENTRIES in elements */
- apr_size_t entry_count;
-
- /* maximum combined size of all cached windows */
- apr_size_t capacity;
-
- /* current combined size of all cached windows */
- apr_size_t used;
-} window_cache_t;
-
-/* Root data structure containing all information about a given repository.
- */
-typedef struct fs_fs_t
-{
- /* repository to reorg */
- const char *path;
-
- /* revision to start at (must be 0, ATM) */
- svn_revnum_t start_revision;
-
- /* FSFS format number */
- int format;
-
- /* highest revision number in the repo */
- svn_revnum_t max_revision;
-
- /* first non-packed revision */
- svn_revnum_t min_unpacked_rev;
-
- /* sharing size*/
- int max_files_per_dir;
-
- /* all revisions */
- apr_array_header_t *revisions;
-
- /* all packed files */
- apr_array_header_t *packs;
-
- /* empty representation.
- * Used as a dummy base for DELTA reps without base. */
- representation_t *null_base;
-
- /* revision content cache */
- content_cache_t *cache;
-
- /* directory hash cache */
- dir_cache_t *dir_cache;
-
- /* undeltified txdelta window cache */
- window_cache_t *window_cache;
-} fs_fs_t;
-
-/* Return the rev pack folder for revision REV in FS.
- */
-static const char *
-get_pack_folder(fs_fs_t *fs,
- svn_revnum_t rev,
- apr_pool_t *pool)
-{
- return apr_psprintf(pool, "%s/db/revs/%ld.pack",
- fs->path, rev / fs->max_files_per_dir);
-}
-
-/* Return the path of the file containing revision REV in FS.
- */
-static const char *
-rev_or_pack_file_name(fs_fs_t *fs,
- svn_revnum_t rev,
- apr_pool_t *pool)
-{
- return fs->min_unpacked_rev > rev
- ? svn_dirent_join(get_pack_folder(fs, rev, pool), "pack", pool)
- : apr_psprintf(pool, "%s/db/revs/%ld/%ld", fs->path,
- rev / fs->max_files_per_dir, rev);
-}
-
-/* Open the file containing revision REV in FS and return it in *FILE.
- */
-static svn_error_t *
-open_rev_or_pack_file(apr_file_t **file,
- fs_fs_t *fs,
- svn_revnum_t rev,
- apr_pool_t *pool)
-{
- return svn_io_file_open(file,
- rev_or_pack_file_name(fs, rev, pool),
- APR_READ | APR_BUFFERED,
- APR_OS_DEFAULT,
- pool);
-}
-
-/* Read the whole content of the file containing REV in FS and return that
- * in *CONTENT.
- */
-static svn_error_t *
-read_rev_or_pack_file(svn_stringbuf_t **content,
- fs_fs_t *fs,
- svn_revnum_t rev,
- apr_pool_t *pool)
-{
- return svn_stringbuf_from_file2(content,
- rev_or_pack_file_name(fs, rev, pool),
- pool);
-}
-
-/* Return a new content cache with the given size LIMIT. Use POOL for
- * all cache-related allocations.
- */
-static content_cache_t *
-create_content_cache(apr_pool_t *pool,
- apr_size_t limit)
-{
- content_cache_t *result = apr_pcalloc(pool, sizeof(*result));
-
- result->hash_pool = svn_pool_create(pool);
- result->hash = svn_hash__make(result->hash_pool);
- result->limit = limit;
- result->total_size = 0;
- result->insert_count = 0;
- result->data = apr_palloc(pool, limit);
-
- return result;
-}
-
-/* Return the content of revision REVISION from CACHE. Return NULL upon a
- * cache miss. This is a cache-internal function.
- */
-static svn_string_t *
-get_cached_content(content_cache_t *cache,
- svn_revnum_t revision)
-{
- return apr_hash_get(cache->hash, &revision, sizeof(revision));
-}
-
-/* Take the content in DATA and store it under REVISION in CACHE.
- * This is a cache-internal function.
- */
-static void
-set_cached_content(content_cache_t *cache,
- svn_revnum_t revision,
- svn_string_t *data)
-{
- svn_string_t *content;
- svn_revnum_t *key;
-
- /* double insertion? -> broken cache logic */
- assert(get_cached_content(cache, revision) == NULL);
-
- /* purge the cache upon overflow */
- if (cache->total_size + data->len > cache->limit)
- {
- /* the hash pool grows slowly over time; clear it once in a while */
- if (cache->insert_count > 10000)
- {
- svn_pool_clear(cache->hash_pool);
- cache->hash = svn_hash__make(cache->hash_pool);
- cache->insert_count = 0;
- }
- else
- cache->hash = svn_hash__make(cache->hash_pool);
-
- cache->total_size = 0;
-
- /* buffer overflow / revision too large */
- if (data->len > cache->limit)
- SVN_ERR_MALFUNCTION_NO_RETURN();
- }
-
- /* copy data to cache and update he index (hash) */
- content = apr_palloc(cache->hash_pool, sizeof(*content));
- content->data = cache->data + cache->total_size;
- content->len = data->len;
-
- memcpy(cache->data + cache->total_size, data->data, data->len);
- cache->total_size += data->len;
-
- key = apr_palloc(cache->hash_pool, sizeof(*key));
- *key = revision;
-
- apr_hash_set(cache->hash, key, sizeof(*key), content);
- ++cache->insert_count;
-}
-
-/* Get the file content of revision REVISION in FS and return it in *DATA.
- * Use SCRATCH_POOL for temporary allocations.
- */
-static svn_error_t *
-get_content(svn_string_t **data,
- fs_fs_t *fs,
- svn_revnum_t revision,
- apr_pool_t *scratch_pool)
-{
- apr_file_t *file;
- revision_info_t *revision_info;
- svn_stringbuf_t *temp;
- apr_off_t temp_offset;
-
- /* try getting the data from our cache */
- svn_string_t *result = get_cached_content(fs->cache, revision);
- if (result)
- {
- *data = result;
- return SVN_NO_ERROR;
- }
-
- /* not in cache. Is the revision valid at all? */
- if (revision - fs->start_revision > fs->revisions->nelts)
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("Unknown revision %ld"), revision);
- revision_info = APR_ARRAY_IDX(fs->revisions,
- revision - fs->start_revision,
- revision_info_t*);
-
- /* read the revision content. Assume that the file has *not* been
- * reorg'ed, yet, i.e. all data is in one place. */
- temp = svn_stringbuf_create_ensure( revision_info->original.end
- - revision_info->original.offset,
- scratch_pool);
- temp->len = revision_info->original.end - revision_info->original.offset;
- SVN_ERR(open_rev_or_pack_file(&file, fs, revision, scratch_pool));
-
- temp_offset = revision_info->original.offset;
- SVN_ERR(svn_io_file_seek(file, APR_SET, &temp_offset,
- scratch_pool));
- SVN_ERR_ASSERT(temp_offset < APR_SIZE_MAX);
- revision_info->original.offset = (apr_size_t)temp_offset;
- SVN_ERR(svn_io_file_read(file, temp->data, &temp->len, scratch_pool));
-
- /* cache the result and return it */
- set_cached_content(fs->cache, revision,
- svn_stringbuf__morph_into_string(temp));
- *data = get_cached_content(fs->cache, revision);
-
- return SVN_NO_ERROR;
-}
-
-/* Return a new directory cache with ENTRY_COUNT buckets in its index.
- * Use POOL for all cache-related allocations.
- */
-static dir_cache_t *
-create_dir_cache(apr_pool_t *pool,
- apr_size_t entry_count)
-{
- dir_cache_t *result = apr_pcalloc(pool, sizeof(*result));
-
- result->pool1 = svn_pool_create(pool);
- result->pool2 = svn_pool_create(pool);
- result->entry_count = entry_count;
- result->insert_count = 0;
- result->entries = apr_pcalloc(pool, sizeof(*result->entries) * entry_count);
-
- return result;
-}
-
-/* Return the position within FS' dir cache ENTRIES index for the given
- * (REVISION, OFFSET) pair. This is a cache-internal function.
- */
-static apr_size_t
-get_dir_cache_index(fs_fs_t *fs,
- svn_revnum_t revision,
- apr_size_t offset)
-{
- return (revision + offset * 0xd1f3da69) % fs->dir_cache->entry_count;
-}
-
-/* Return the currently active pool of FS' dir cache. Note that it may be
- * cleared after *2* insertions.
- */
-static apr_pool_t *
-get_cached_dir_pool(fs_fs_t *fs)
-{
- return fs->dir_cache->pool1;
-}
-
-/* Return the cached directory content stored in REPRESENTATION within FS.
- * If that has not been found in cache, return NULL.
- */
-static apr_hash_t *
-get_cached_dir(fs_fs_t *fs,
- representation_t *representation)
-{
- svn_revnum_t revision = representation->revision->revision;
- apr_size_t offset = representation->original.offset;
-
- apr_size_t i = get_dir_cache_index(fs, revision, offset);
- dir_cache_entry_t *entry = &fs->dir_cache->entries[i];
-
- return entry->offset == offset && entry->revision == revision
- ? entry->hash
- : NULL;
-}
-
-/* Cache the directory HASH for REPRESENTATION within FS.
- */
-static void
-set_cached_dir(fs_fs_t *fs,
- representation_t *representation,
- apr_hash_t *hash)
-{
- /* select the entry to use */
- svn_revnum_t revision = representation->revision->revision;
- apr_size_t offset = representation->original.offset;
-
- apr_size_t i = get_dir_cache_index(fs, revision, offset);
- dir_cache_entry_t *entry = &fs->dir_cache->entries[i];
-
- /* clean the cache and rotate pools at regular intervals */
- fs->dir_cache->insert_count += apr_hash_count(hash);
- if (fs->dir_cache->insert_count >= fs->dir_cache->entry_count * 100)
- {
- apr_pool_t *pool;
-
- svn_pool_clear(fs->dir_cache->pool2);
- memset(fs->dir_cache->entries,
- 0,
- sizeof(*fs->dir_cache->entries) * fs->dir_cache->entry_count);
- fs->dir_cache->insert_count = 0;
-
- pool = fs->dir_cache->pool2;
- fs->dir_cache->pool2 = fs->dir_cache->pool1;
- fs->dir_cache->pool1 = pool;
- }
-
- /* write data to cache */
- entry->hash = hash;
- entry->offset = offset;
- entry->revision = revision;
-}
-
-/* Return a new txdelta window cache with ENTRY_COUNT buckets in its index
- * and a the total CAPACITY given in bytes.
- * Use POOL for all cache-related allocations.
- */
-static window_cache_t *
-create_window_cache(apr_pool_t *pool,
- apr_size_t entry_count,
- apr_size_t capacity)
-{
- window_cache_t *result = apr_pcalloc(pool, sizeof(*result));
-
- result->pool = svn_pool_create(pool);
- result->entry_count = entry_count;
- result->capacity = capacity;
- result->used = 0;
- result->entries = apr_pcalloc(pool, sizeof(*result->entries) * entry_count);
-
- return result;
-}
-
-/* Return the position within FS' window cache ENTRIES index for the given
- * (REVISION, OFFSET) pair. This is a cache-internal function.
- */
-static apr_size_t
-get_window_cache_index(fs_fs_t *fs,
- svn_revnum_t revision,
- apr_size_t offset)
-{
- return (revision + offset * 0xd1f3da69) % fs->window_cache->entry_count;
-}
-
-/* Return the cached txdelta window stored in REPRESENTATION within FS.
- * If that has not been found in cache, return NULL.
- */
-static svn_stringbuf_t *
-get_cached_window(fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *pool)
-{
- svn_revnum_t revision = representation->revision->revision;
- apr_size_t offset = representation->original.offset;
-
- apr_size_t i = get_window_cache_index(fs, revision, offset);
- window_cache_entry_t *entry = &fs->window_cache->entries[i];
-
- return entry->offset == offset && entry->revision == revision
- ? svn_stringbuf_dup(entry->window, pool)
- : NULL;
-}
-
-/* Cache the undeltified txdelta WINDOW for REPRESENTATION within FS.
- */
-static void
-set_cached_window(fs_fs_t *fs,
- representation_t *representation,
- svn_stringbuf_t *window)
-{
- /* select entry */
- svn_revnum_t revision = representation->revision->revision;
- apr_size_t offset = representation->original.offset;
-
- apr_size_t i = get_window_cache_index(fs, revision, offset);
- window_cache_entry_t *entry = &fs->window_cache->entries[i];
-
- /* if the capacity is exceeded, clear the cache */
- fs->window_cache->used += window->len;
- if (fs->window_cache->used >= fs->window_cache->capacity)
- {
- svn_pool_clear(fs->window_cache->pool);
- memset(fs->window_cache->entries,
- 0,
- sizeof(*fs->window_cache->entries) * fs->window_cache->entry_count);
- fs->window_cache->used = window->len;
- }
-
- /* set the entry to a copy of the window data */
- entry->window = svn_stringbuf_dup(window, fs->window_cache->pool);
- entry->offset = offset;
- entry->revision = revision;
-}
-
-/* Given rev pack PATH in FS, read the manifest file and return the offsets
- * in *MANIFEST. Use POOL for allocations.
- */
-static svn_error_t *
-read_manifest(apr_array_header_t **manifest,
- fs_fs_t *fs,
- const char *path,
- apr_pool_t *pool)
-{
- svn_stream_t *manifest_stream;
- apr_pool_t *iterpool;
-
- /* Open the manifest file. */
- SVN_ERR(svn_stream_open_readonly(&manifest_stream,
- svn_dirent_join(path, "manifest", pool),
- pool, pool));
-
- /* While we're here, let's just read the entire manifest file into an array,
- so we can cache the entire thing. */
- iterpool = svn_pool_create(pool);
- *manifest = apr_array_make(pool, fs->max_files_per_dir, sizeof(apr_size_t));
- while (1)
- {
- svn_stringbuf_t *sb;
- svn_boolean_t eof;
- apr_uint64_t val;
- svn_error_t *err;
-
- svn_pool_clear(iterpool);
- SVN_ERR(svn_stream_readline(manifest_stream, &sb, "\n", &eof, iterpool));
- if (eof)
- break;
-
- err = svn_cstring_strtoui64(&val, sb->data, 0, APR_SIZE_MAX, 10);
- if (err)
- return svn_error_createf(SVN_ERR_FS_CORRUPT, err,
- _("Manifest offset '%s' too large"),
- sb->data);
- APR_ARRAY_PUSH(*manifest, apr_size_t) = (apr_size_t)val;
- }
- svn_pool_destroy(iterpool);
-
- return svn_stream_close(manifest_stream);
-}
-
-/* Read header information for the revision stored in FILE_CONTENT at
- * offsets START or END. Return the offsets within FILE_CONTENT for the
- * *ROOT_NODEREV, the list of *CHANGES and its len in *CHANGES_LEN.
- * Use POOL for temporary allocations. */
-static svn_error_t *
-read_revision_header(apr_size_t *changes,
- apr_size_t *changes_len,
- apr_size_t *root_noderev,
- svn_stringbuf_t *file_content,
- apr_size_t start,
- apr_size_t end,
- apr_pool_t *pool)
-{
- char buf[64];
- const char *line;
- char *space;
- apr_uint64_t val;
- apr_size_t len;
-
- /* Read in this last block, from which we will identify the last line. */
- len = sizeof(buf);
- if (start + len > end)
- len = end - start;
-
- memcpy(buf, file_content->data + end - len, len);
-
- /* The last byte should be a newline. */
- if (buf[(apr_ssize_t)len - 1] != '\n')
- return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
- _("Revision lacks trailing newline"));
-
- /* Look for the next previous newline. */
- buf[len - 1] = 0;
- line = strrchr(buf, '\n');
- if (line == NULL)
- return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
- _("Final line in revision file longer "
- "than 64 characters"));
-
- space = strchr(line, ' ');
- if (space == NULL)
- return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
- _("Final line in revision file missing space"));
-
- /* terminate the header line */
- *space = 0;
-
- /* extract information */
- SVN_ERR(svn_cstring_strtoui64(&val, line+1, 0, APR_SIZE_MAX, 10));
- *root_noderev = (apr_size_t)val;
- SVN_ERR(svn_cstring_strtoui64(&val, space+1, 0, APR_SIZE_MAX, 10));
- *changes = (apr_size_t)val;
- *changes_len = end - *changes - start - (buf + len - line) + 1;
-
- return SVN_NO_ERROR;
-}
-
-/* Read the FSFS format number and sharding size from the format file at
- * PATH and return it in *PFORMAT and *MAX_FILES_PER_DIR respectively.
- * Use POOL for temporary allocations.
- */
-static svn_error_t *
-read_format(int *pformat, int *max_files_per_dir,
- const char *path, apr_pool_t *pool)
-{
- svn_error_t *err;
- apr_file_t *file;
- char buf[80];
- apr_size_t len;
-
- /* open format file and read the first line */
- err = svn_io_file_open(&file, path, APR_READ | APR_BUFFERED,
- APR_OS_DEFAULT, pool);
- if (err && APR_STATUS_IS_ENOENT(err->apr_err))
- {
- /* Treat an absent format file as format 1. Do not try to
- create the format file on the fly, because the repository
- might be read-only for us, or this might be a read-only
- operation, and the spirit of FSFS is to make no changes
- whatseover in read-only operations. See thread starting at
- http://subversion.tigris.org/servlets/ReadMsg?list=dev&msgNo=97600
- for more. */
- svn_error_clear(err);
- *pformat = 1;
- *max_files_per_dir = 0;
-
- return SVN_NO_ERROR;
- }
- SVN_ERR(err);
-
- len = sizeof(buf);
- err = svn_io_read_length_line(file, buf, &len, pool);
- if (err && APR_STATUS_IS_EOF(err->apr_err))
- {
- /* Return a more useful error message. */
- svn_error_clear(err);
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("Can't read first line of format file '%s'"),
- svn_dirent_local_style(path, pool));
- }
- SVN_ERR(err);
-
- /* Check that the first line contains only digits. */
- SVN_ERR(svn_cstring_atoi(pformat, buf));
-
- /* Set the default values for anything that can be set via an option. */
- *max_files_per_dir = 0;
-
- /* Read any options. */
- while (1)
- {
- len = sizeof(buf);
- err = svn_io_read_length_line(file, buf, &len, pool);
- if (err && APR_STATUS_IS_EOF(err->apr_err))
- {
- /* No more options; that's okay. */
- svn_error_clear(err);
- break;
- }
- SVN_ERR(err);
-
- if (strncmp(buf, "layout ", 7) == 0)
- {
- if (strcmp(buf+7, "linear") == 0)
- {
- *max_files_per_dir = 0;
- continue;
- }
-
- if (strncmp(buf+7, "sharded ", 8) == 0)
- {
- /* Check that the argument is numeric. */
- SVN_ERR(svn_cstring_atoi(max_files_per_dir, buf + 15));
- continue;
- }
- }
-
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("'%s' contains invalid filesystem format option '%s'"),
- svn_dirent_local_style(path, pool), buf);
- }
-
- return svn_io_file_close(file, pool);
-}
-
-/* Read the content of the file at PATH and return it in *RESULT.
- * Use POOL for temporary allocations.
- */
-static svn_error_t *
-read_number(svn_revnum_t *result, const char *path, apr_pool_t *pool)
-{
- svn_stringbuf_t *content;
- apr_uint64_t number;
-
- SVN_ERR(svn_stringbuf_from_file2(&content, path, pool));
-
- content->data[content->len-1] = 0;
- SVN_ERR(svn_cstring_strtoui64(&number, content->data, 0, LONG_MAX, 10));
- *result = (svn_revnum_t)number;
-
- return SVN_NO_ERROR;
-}
-
-/* Create *FS for the repository at PATH and read the format and size info.
- * Use POOL for temporary allocations.
- */
-static svn_error_t *
-fs_open(fs_fs_t **fs, const char *path, apr_pool_t *pool)
-{
- *fs = apr_pcalloc(pool, sizeof(**fs));
- (*fs)->path = apr_pstrdup(pool, path);
- (*fs)->max_files_per_dir = 1000;
-
- /* Read the FS format number. */
- SVN_ERR(read_format(&(*fs)->format,
- &(*fs)->max_files_per_dir,
- svn_dirent_join(path, "db/format", pool),
- pool));
- if (((*fs)->format != 4) && ((*fs)->format != 6))
- return svn_error_create(SVN_ERR_FS_UNSUPPORTED_FORMAT, NULL, NULL);
-
- /* read size (HEAD) info */
- SVN_ERR(read_number(&(*fs)->min_unpacked_rev,
- svn_dirent_join(path, "db/min-unpacked-rev", pool),
- pool));
- return read_number(&(*fs)->max_revision,
- svn_dirent_join(path, "db/current", pool),
- pool);
-}
-
-/* Utility function that returns true if STRING->DATA matches KEY.
- */
-static svn_boolean_t
-key_matches(svn_string_t *string, const char *key)
-{
- return strcmp(string->data, key) == 0;
-}
-
-/* Comparator used for binary search comparing the absolute file offset
- * of a noderev to some other offset. DATA is a *noderev_t, KEY is pointer
- * to an apr_size_t.
- */
-static int
-compare_noderev_offsets(const void *data, const void *key)
-{
- apr_ssize_t diff = (*(const noderev_t *const *)data)->original.offset
- - *(const apr_size_t *)key;
-
- /* sizeof(int) may be < sizeof(ssize_t) */
- if (diff < 0)
- return -1;
- return diff > 0 ? 1 : 0;
-}
-
-/* Get the revision and offset info from the node ID with FS. Return the
- * data as *REVISION_INFO and *OFFSET, respectively.
- *
- * Note that we assume that the revision_info_t object ID's revision has
- * already been created. That can be guaranteed for standard FSFS pack
- * files as IDs never point to future revisions.
- */
-static svn_error_t *
-parse_revnode_pos(revision_info_t **revision_info,
- apr_size_t *offset,
- fs_fs_t *fs,
- svn_string_t *id)
-{
- int revision;
- apr_uint64_t temp;
-
- /* split the ID and verify the format */
- const char *revision_pos = strrchr(id->data, 'r');
- char *offset_pos = (char *)strchr(id->data, '/');
-
- if (revision_pos == NULL || offset_pos == NULL)
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("Invalid node id '%s'"), id->data);
-
- /* extract the numbers (temp. modifying the ID)*/
- *offset_pos = 0;
- SVN_ERR(svn_cstring_atoi(&revision, revision_pos + 1));
- SVN_ERR(svn_cstring_strtoui64(&temp, offset_pos + 1, 0, APR_SIZE_MAX, 10));
- *offset = (apr_size_t)temp;
- *offset_pos = '/';
-
- /* validate the revision number and return the revision info */
- if (revision - fs->start_revision > fs->revisions->nelts)
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("Unknown revision %d"), revision);
-
- *revision_info = APR_ARRAY_IDX(fs->revisions,
- revision - fs->start_revision,
- revision_info_t*);
-
- return SVN_NO_ERROR;
-}
-
-/* Returns in *RESULT the noderev at OFFSET relative the revision given in
- * REVISION_INFO. If no such noderev has been parsed, yet, error out.
- *
- * Since we require the noderev to already have been parsed, we can use
- * this functions only to access "older", i.e. predecessor noderevs.
- */
-static svn_error_t *
-find_noderev(noderev_t **result,
- revision_info_t *revision_info,
- apr_size_t offset)
-{
- int idx = svn_sort__bsearch_lower_bound(&offset,
- revision_info->node_revs,
- compare_noderev_offsets);
- if ((idx < 0) || (idx >= revision_info->node_revs->nelts))
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("No noderev found at offset %ld"),
- (long)offset);
-
- *result = APR_ARRAY_IDX(revision_info->node_revs, idx, noderev_t *);
- if ((*result)->original.offset != offset)
- return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
- _("No noderev found at offset %ld"),
- (long)offset);
-
- return SVN_NO_ERROR;
-}
-
-/* In *RESULT, return the noderev given by ID in FS. The noderev must
- * already have been parsed and put into the FS data structures.
- */
-static svn_error_t *
-parse_pred(noderev_t **result,
- fs_fs_t *fs,
- svn_string_t *id)
-{
- apr_size_t offset;
- revision_info_t *revision_info;
-
- SVN_ERR(parse_revnode_pos(&revision_info, &offset, fs, id));
- SVN_ERR(find_noderev(result, revision_info, offset));
-
- return SVN_NO_ERROR;
-}
-
-/* Comparator used for binary search comparing the absolute file offset
- * of a representation to some other offset. DATA is a *representation_t,
- * KEY is a pointer to an apr_size_t.
- */
-static int
-compare_representation_offsets(const void *data, const void *key)
-{
- apr_ssize_t diff = (*(const representation_t *const *)data)->original.offset
- - *(const apr_size_t *)key;
-
- /* sizeof(int) may be < sizeof(ssize_t) */
- if (diff < 0)
- return -1;
- return diff > 0 ? 1 : 0;
-}
-
-/* Find the revision_info_t object to the given REVISION in FS and return
- * it in *REVISION_INFO. For performance reasons, we skip the lookup if
- * the info is already provided.
- *
- * In that revision, look for the representation_t object for offset OFFSET.
- * If it already exists, set *idx to its index in *REVISION_INFO's
- * representations list and return the representation object. Otherwise,
- * set the index to where it must be inserted and return NULL.
- */
-static representation_t *
-find_representation(int *idx,
- fs_fs_t *fs,
- revision_info_t **revision_info,
- int revision,
- apr_size_t offset)
-{
- revision_info_t *info;
- *idx = -1;
-
- /* first let's find the revision '*/
- info = revision_info ? *revision_info : NULL;
- if (info == NULL || info->revision != revision)
- {
- info = APR_ARRAY_IDX(fs->revisions,
- revision - fs->start_revision,
- revision_info_t*);
- if (revision_info)
- *revision_info = info;
- }
-
- /* not found -> no result */
- if (info == NULL)
- return NULL;
-
- assert(revision == info->revision);
-
- /* look for the representation */
- *idx = svn_sort__bsearch_lower_bound(&offset,
- info->representations,
- compare_representation_offsets);
- if (*idx < info->representations->nelts)
- {
- /* return the representation, if this is the one we were looking for */
- representation_t *result
- = APR_ARRAY_IDX(info->representations, *idx, representation_t *);
- if (result->original.offset == offset)
- return result;
- }
-
- /* not parsed, yet */
- return NULL;
-}
-
-/* Read the representation header in FILE_CONTENT at OFFSET. Return its
- * size in *HEADER_SIZE, set *IS_PLAIN if no deltification was used and
- * return the deltification base representation in *REPRESENTATION. If
- * there is none, set it to NULL. Use FS to it look up.
- *
- * Use SCRATCH_POOL for temporary allocations.
- */
-static svn_error_t *
-read_rep_base(representation_t **representation,
- apr_size_t *header_size,
- svn_boolean_t *is_plain,
- fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- apr_size_t offset,
- apr_pool_t *scratch_pool)
-{
- char *str, *last_str;
- int idx, revision;
- apr_uint64_t temp;
-
- /* identify representation header (1 line) */
- const char *buffer = file_content->data + offset;
- const char *line_end = strchr(buffer, '\n');
- *header_size = line_end - buffer + 1;
-
- /* check for PLAIN rep */
- if (strncmp(buffer, "PLAIN\n", *header_size) == 0)
- {
- *is_plain = TRUE;
- *representation = NULL;
- return SVN_NO_ERROR;
- }
-
- /* check for DELTA against empty rep */
- *is_plain = FALSE;
- if (strncmp(buffer, "DELTA\n", *header_size) == 0)
- {
- /* This is a delta against the empty stream. */
- *representation = fs->null_base;
- return SVN_NO_ERROR;
- }
-
- /* it's delta against some other rep. Duplicate the header info such
- * that we may modify it during parsing. */
- str = apr_pstrndup(scratch_pool, buffer, line_end - buffer);
- last_str = str;
-
- /* parse it. */
- str = svn_cstring_tokenize(" ", &last_str);
- str = svn_cstring_tokenize(" ", &last_str);
- SVN_ERR(svn_cstring_atoi(&revision, str));
-
- str = svn_cstring_tokenize(" ", &last_str);
- SVN_ERR(svn_cstring_strtoui64(&temp, str, 0, APR_SIZE_MAX, 10));
-
- /* it should refer to a rep in an earlier revision. Look it up */
- *representation = find_representation(&idx, fs, NULL, revision, (apr_size_t)temp);
- return SVN_NO_ERROR;
-}
-
-/* Parse the representation reference (text: or props:) in VALUE, look
- * it up in FS and return it in *REPRESENTATION. To be able to parse the
- * base rep, we pass the FILE_CONTENT as well.
- *
- * If necessary, allocate the result in POOL; use SCRATCH_POOL for temp.
- * allocations.
- */
-static svn_error_t *
-parse_representation(representation_t **representation,
- fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- svn_string_t *value,
- revision_info_t *revision_info,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool)
-{
- representation_t *result;
- int revision;
-
- apr_uint64_t offset;
- apr_uint64_t size;
- int idx;
-
- /* read location (revision, offset) and size */
- char *c = (char *)value->data;
- SVN_ERR(svn_cstring_atoi(&revision, svn_cstring_tokenize(" ", &c)));
- SVN_ERR(svn_cstring_strtoui64(&offset, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
- SVN_ERR(svn_cstring_strtoui64(&size, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
-
- /* look it up */
- result = find_representation(&idx, fs, &revision_info, revision, (apr_size_t)offset);
- if (!result)
- {
- /* not parsed, yet (probably a rep in the same revision).
- * Create a new rep object and determine its base rep as well.
- */
- result = apr_pcalloc(pool, sizeof(*result));
- result->revision = revision_info;
- result->original.offset = (apr_size_t)offset;
- result->original.size = (apr_size_t)size;
- SVN_ERR(read_rep_base(&result->delta_base, &result->header_size,
- &result->is_plain, fs, file_content,
- (apr_size_t)offset + revision_info->original.offset,
- scratch_pool));
-
- svn_sort__array_insert(&result, revision_info->representations, idx);
- }
-
- *representation = result;
-
- return SVN_NO_ERROR;
-}
-
-/* Read the delta window contents of all windows in REPRESENTATION in FS.
- * Return the data as svn_txdelta_window_t* instances in *WINDOWS.
- * Use POOL for allocations.
- */
-static svn_error_t *
-read_windows(apr_array_header_t **windows,
- fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *pool)
-{
- svn_string_t *content;
- svn_string_t data;
- svn_stream_t *stream;
- apr_size_t offset = representation->original.offset
- + representation->header_size;
- char version;
- apr_size_t len = sizeof(version);
-
- *windows = apr_array_make(pool, 0, sizeof(svn_txdelta_window_t *));
-
- /* get the whole revision content */
- SVN_ERR(get_content(&content, fs, representation->revision->revision, pool));
-
- /* create a read stream and position it directly after the rep header */
- data.data = content->data + offset + 3;
- data.len = representation->original.size - 3;
- stream = svn_stream_from_string(&data, pool);
- SVN_ERR(svn_stream_read(stream, &version, &len));
-
- /* read the windows from that stream */
- while (TRUE)
- {
- svn_txdelta_window_t *window;
- svn_stream_mark_t *mark;
- char dummy;
-
- len = sizeof(dummy);
- SVN_ERR(svn_stream_mark(stream, &mark, pool));
- SVN_ERR(svn_stream_read(stream, &dummy, &len));
- if (len == 0)
- break;
-
- SVN_ERR(svn_stream_seek(stream, mark));
- SVN_ERR(svn_txdelta_read_svndiff_window(&window, stream, version, pool));
- APR_ARRAY_PUSH(*windows, svn_txdelta_window_t *) = window;
- }
-
- return SVN_NO_ERROR;
-}
-
-/* Read the content of the PLAIN REPRESENTATION in FS and return it in
- * *CONTENT. Use POOL for allocations.
- */
-static svn_error_t *
-read_plain(svn_stringbuf_t **content,
- fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *pool)
-{
- svn_string_t *data;
- apr_size_t offset = representation->original.offset
- + representation->header_size;
-
- SVN_ERR(get_content(&data, fs, representation->revision->revision, pool));
-
- /* content is stored as fulltext already */
- *content = svn_stringbuf_ncreate(data->data + offset,
- representation->original.size,
- pool);
-
- return SVN_NO_ERROR;
-}
-
-/* Get the undeltified representation that is a result of combining all
- * deltas from the current desired REPRESENTATION in FS with its base
- * representation. Store the result in *CONTENT.
- * Use POOL for allocations. */
-static svn_error_t *
-get_combined_window(svn_stringbuf_t **content,
- fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *pool)
-{
- int i;
- apr_array_header_t *windows;
- svn_stringbuf_t *base_content, *result;
- const char *source;
- apr_pool_t *sub_pool;
- apr_pool_t *iter_pool;
-
- /* special case: no un-deltification necessary */
- if (representation->is_plain)
- return read_plain(content, fs, representation, pool);
-
- /* special case: data already in cache */
- *content = get_cached_window(fs, representation, pool);
- if (*content)
- return SVN_NO_ERROR;
-
- /* read the delta windows for this representation */
- sub_pool = svn_pool_create(pool);
- iter_pool = svn_pool_create(pool);
- SVN_ERR(read_windows(&windows, fs, representation, sub_pool));
-
- /* fetch the / create a base content */
- if (representation->delta_base && representation->delta_base->revision)
- SVN_ERR(get_combined_window(&base_content, fs,
- representation->delta_base, sub_pool));
- else
- base_content = svn_stringbuf_create_empty(sub_pool);
-
- /* apply deltas */
- result = svn_stringbuf_create_empty(pool);
- source = base_content->data;
-
- for (i = 0; i < windows->nelts; ++i)
- {
- svn_txdelta_window_t *window
- = APR_ARRAY_IDX(windows, i, svn_txdelta_window_t *);
- svn_stringbuf_t *buf
- = svn_stringbuf_create_ensure(window->tview_len, iter_pool);
-
- buf->len = window->tview_len;
- svn_txdelta_apply_instructions(window, window->src_ops ? source : NULL,
- buf->data, &buf->len);
-
- svn_stringbuf_appendbytes(result, buf->data, buf->len);
- source += window->sview_len;
-
- svn_pool_clear(iter_pool);
- }
-
- svn_pool_destroy(iter_pool);
- svn_pool_destroy(sub_pool);
-
- /* cache result and return it */
- set_cached_window(fs, representation, result);
- *content = result;
-
- return SVN_NO_ERROR;
-}
-
-/* forward declaration */
-static svn_error_t *
-read_noderev(noderev_t **noderev,
- fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- apr_size_t offset,
- revision_info_t *revision_info,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool);
-
-/* Get the noderev at OFFSET in FILE_CONTENT in FS. The file content must
- * pertain to the revision given in REVISION_INFO. If the data has not
- * been read yet, parse it and store it in REVISION_INFO. Return the result
- * in *NODEREV.
- *
- * Use POOL for allocations and SCRATCH_POOL for temporaries.
- */
-static svn_error_t *
-get_noderev(noderev_t **noderev,
- fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- apr_size_t offset,
- revision_info_t *revision_info,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool)
-{
- int idx = svn_sort__bsearch_lower_bound(&offset,
- revision_info->node_revs,
- compare_noderev_offsets);
- if ((idx < 0) || (idx >= revision_info->node_revs->nelts))
- SVN_ERR(read_noderev(noderev, fs, file_content, offset, revision_info,
- pool, scratch_pool));
- else
- {
- *noderev = APR_ARRAY_IDX(revision_info->node_revs, idx, noderev_t *);
- if ((*noderev)->original.offset != offset)
- SVN_ERR(read_noderev(noderev, fs, file_content, offset, revision_info,
- pool, scratch_pool));
- }
-
- return SVN_NO_ERROR;
-}
-
-/* Read the directory stored in REPRESENTATION in FS into *HASH. The result
- * will be allocated in FS' directory cache and it will be plain key-value
- * hash. Use SCRATCH_POOL for temporary allocations.
- */
-static svn_error_t *
-read_dir(apr_hash_t **hash,
- fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *scratch_pool)
-{
- svn_stringbuf_t *text;
- apr_pool_t *text_pool;
- svn_stream_t *stream;
- apr_pool_t *pool;
-
- /* chances are, we find the info in cache already */
- *hash = get_cached_dir(fs, representation);
- if (*hash)
- return SVN_NO_ERROR;
-
- /* create the result container */
- pool = get_cached_dir_pool(fs);
- *hash = svn_hash__make(pool);
-
- /* if this is a non-empty rep, read it and de-serialize the hash */
- if (representation != NULL)
- {
- text_pool = svn_pool_create(scratch_pool);
- SVN_ERR(get_combined_window(&text, fs, representation, text_pool));
- stream = svn_stream_from_stringbuf(text, text_pool);
- SVN_ERR(svn_hash_read2(*hash, stream, SVN_HASH_TERMINATOR, pool));
- svn_pool_destroy(text_pool);
- }
-
- /* cache the result */
- set_cached_dir(fs, representation, *hash);
-
- return SVN_NO_ERROR;
-}
-
-/* Starting at the directory in REPRESENTATION in FILE_CONTENT, read all
- * DAG nodes, directories and representations linked in that tree structure.
- * Store them in FS and read them only once.
- *
- * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
- */
-static svn_error_t *
-parse_dir(fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- representation_t *representation,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool)
-{
- apr_hash_t *hash;
- apr_hash_index_t *hi;
- apr_pool_t *iter_pool = svn_pool_create(scratch_pool);
- apr_hash_t *base_dir = svn_hash__make(scratch_pool);
-
- /* special case: empty dir rep */
- if (representation == NULL)
- return SVN_NO_ERROR;
-
- /* if we have a previous representation of that dir, hash it by name */
- if (representation->delta_base && representation->delta_base->dir)
- {
- apr_array_header_t *dir = representation->delta_base->dir->entries;
- int i;
-
- for (i = 0; i < dir->nelts; ++i)
- {
- direntry_t *entry = APR_ARRAY_IDX(dir, i, direntry_t *);
- apr_hash_set(base_dir, entry->name, entry->name_len, entry);
- }
- }
-
- /* read this directory */
- SVN_ERR(read_dir(&hash, fs, representation, scratch_pool));
-
- /* add it as an array to the representation (entries yet to be filled) */
- representation->dir = apr_pcalloc(pool, sizeof(*representation->dir));
- representation->dir->entries
- = apr_array_make(pool, apr_hash_count(hash), sizeof(direntry_t *));
-
- /* Translate the string dir entries into real entries. Reuse existing
- * objects as much as possible to keep memory consumption low.
- */
- for (hi = apr_hash_first(pool, hash); hi; hi = apr_hash_next(hi))
- {
- const char *name = svn__apr_hash_index_key(hi);
- svn_string_t *str_val = svn__apr_hash_index_val(hi);
- apr_size_t offset;
- revision_info_t *revision_info;
-
- /* look for corresponding entry in previous version */
- apr_size_t name_len = strlen(name);
- direntry_t *entry = base_dir
- ? apr_hash_get(base_dir, name, name_len)
- : NULL;
-
- /* parse the new target revnode ID (revision, offset) */
- SVN_ERR(parse_revnode_pos(&revision_info, &offset, fs, str_val));
-
- /* if this is a new entry or if the content changed, create a new
- * instance for it. */
- if ( !entry
- || !entry->node->text
- || entry->node->text->revision != revision_info
- || entry->node->original.offset != offset)
- {
- /* create & init the new entry. Reuse the name string if possible */
- direntry_t *new_entry = apr_pcalloc(pool, sizeof(*entry));
- new_entry->name_len = name_len;
- if (entry)
- new_entry->name = entry->name;
- else
- new_entry->name = apr_pstrdup(pool, name);
-
- /* Link it to the content noderev. Recurse. */
- entry = new_entry;
- SVN_ERR(get_noderev(&entry->node, fs, file_content, offset,
- revision_info, pool, iter_pool));
- }
-
- /* set the directory entry */
- APR_ARRAY_PUSH(representation->dir->entries, direntry_t *) = entry;
- svn_pool_clear(iter_pool);
- }
-
- svn_pool_destroy(iter_pool);
- return SVN_NO_ERROR;
-}
-
-/* Starting at the noderev at OFFSET in FILE_CONTENT, read all DAG nodes,
- * directories and representations linked in that tree structure. Store
- * them in FS and read them only once. Return the result in *NODEREV.
- *
- * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
- */
-static svn_error_t *
-read_noderev(noderev_t **noderev,
- fs_fs_t *fs,
- svn_stringbuf_t *file_content,
- apr_size_t offset,
- revision_info_t *revision_info,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool)
-{
- noderev_t *result = apr_pcalloc(pool, sizeof(*result));
- svn_string_t *line;
- svn_boolean_t is_dir = FALSE;
-
- scratch_pool = svn_pool_create(scratch_pool);
-
- /* parse the noderev line-by-line until we find an empty line */
- result->original.offset = offset;
- while (1)
- {
- /* for this line, extract key and value. Ignore invalid values */
- svn_string_t key;
- svn_string_t value;
- char *sep;
- const char *start = file_content->data + offset
- + revision_info->original.offset;
- const char *end = strchr(start, '\n');
-
- line = svn_string_ncreate(start, end - start, scratch_pool);
- offset += end - start + 1;
-
- /* empty line -> end of noderev data */
- if (line->len == 0)
- break;
-
- sep = strchr(line->data, ':');
- if (sep == NULL)
- continue;
-
- key.data = line->data;
- key.len = sep - key.data;
- *sep = 0;
-
- if (key.len + 2 > line->len)
- continue;
-
- value.data = sep + 2;
- value.len = line->len - (key.len + 2);
-
- /* translate (key, value) into noderev elements */
- if (key_matches(&key, "type"))
- is_dir = strcmp(value.data, "dir") == 0;
- else if (key_matches(&key, "pred"))
- SVN_ERR(parse_pred(&result->predecessor, fs, &value));
- else if (key_matches(&key, "text"))
- SVN_ERR(parse_representation(&result->text, fs, file_content,
- &value, revision_info,
- pool, scratch_pool));
- else if (key_matches(&key, "props"))
- SVN_ERR(parse_representation(&result->props, fs, file_content,
- &value, revision_info,
- pool, scratch_pool));
- }
-
- /* link noderev to revision info */
- result->revision = revision_info;
- result->original.size = offset - result->original.offset;
-
- svn_sort__array_insert(&result,
- revision_info->node_revs,
- svn_sort__bsearch_lower_bound(&offset,
- revision_info->node_revs,
- compare_noderev_offsets));
-
- /* if this is a directory, read and process that recursively */
- if (is_dir)
- SVN_ERR(parse_dir(fs, file_content, result->text,
- pool, scratch_pool));
-
- /* done */
- svn_pool_destroy(scratch_pool);
- *noderev = result;
-
- return SVN_NO_ERROR;
-}
-
-/* Simple utility to print a REVISION number and make it appear immediately.
- */
-static void
-print_progress(svn_revnum_t revision)
-{
- printf("%8ld", revision);
- fflush(stdout);
-}
-
-/* Read the content of the pack file staring at revision BASE and store it
- * in FS. Use POOL for allocations.
- */
-static svn_error_t *
-read_pack_file(fs_fs_t *fs,
- svn_revnum_t base,
- apr_pool_t *pool)
-{
- apr_array_header_t *manifest = NULL;
- apr_pool_t *local_pool = svn_pool_create(pool);
- apr_pool_t *iter_pool = svn_pool_create(local_pool);
- int i;
- svn_stringbuf_t *file_content;
- revision_pack_t *revisions;
- const char *pack_folder = get_pack_folder(fs, base, local_pool);
-
- /* read the whole pack file into memory */
- SVN_ERR(read_rev_or_pack_file(&file_content, fs, base, local_pool));
-
- /* create the revision container */
- revisions = apr_pcalloc(pool, sizeof(*revisions));
- revisions->base = base;
- revisions->fragments = NULL;
- revisions->info = apr_array_make(pool,
- fs->max_files_per_dir,
- sizeof(revision_info_t*));
- revisions->filesize = file_content->len;
- APR_ARRAY_PUSH(fs->packs, revision_pack_t*) = revisions;
-
- /* parse the manifest file */
- SVN_ERR(read_manifest(&manifest, fs, pack_folder, local_pool));
- if (manifest->nelts != fs->max_files_per_dir)
- return svn_error_create(SVN_ERR_FS_CORRUPT, NULL, NULL);
-
- /* process each revision in the pack file */
- for (i = 0; i < manifest->nelts; ++i)
- {
- apr_size_t root_node_offset;
- svn_string_t rev_content;
-
- /* create the revision info for the current rev */
- revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
- info->node_revs = apr_array_make(iter_pool, 4, sizeof(noderev_t*));
- info->representations = apr_array_make(iter_pool, 4, sizeof(representation_t*));
-
- info->revision = base + i;
- info->original.offset = APR_ARRAY_IDX(manifest, i, apr_size_t);
- info->original.end = i+1 < manifest->nelts
- ? APR_ARRAY_IDX(manifest, i+1 , apr_size_t)
- : file_content->len;
- SVN_ERR(read_revision_header(&info->original.changes,
- &info->original.changes_len,
- &root_node_offset,
- file_content,
- APR_ARRAY_IDX(manifest, i , apr_size_t),
- info->original.end,
- iter_pool));
-
- /* put it into our containers */
- APR_ARRAY_PUSH(revisions->info, revision_info_t*) = info;
- APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
-
- /* cache the revision content */
- rev_content.data = file_content->data + info->original.offset;
- rev_content.len = info->original.end - info->original.offset;
- set_cached_content(fs->cache, info->revision, &rev_content);
-
- /* parse the revision content recursively. */
- SVN_ERR(read_noderev(&info->root_noderev, fs, file_content,
- root_node_offset, info, pool, iter_pool));
-
- /* copy dynamically grown containers from temp into result pool */
- info->node_revs = apr_array_copy(pool, info->node_revs);
- info->representations = apr_array_copy(pool, info->representations);
-
- /* destroy temps */
- svn_pool_clear(iter_pool);
- }
-
- /* one more pack file processed */
- print_progress(base);
- svn_pool_destroy(local_pool);
-
- return SVN_NO_ERROR;
-}
-
-/* Read the content of REVSION file and store it in FS.
- * Use POOL for allocations.
- */
-static svn_error_t *
-read_revision_file(fs_fs_t *fs,
- svn_revnum_t revision,
- apr_pool_t *pool)
-{
- apr_size_t root_node_offset;
- apr_pool_t *local_pool = svn_pool_create(pool);
- svn_stringbuf_t *file_content;
- svn_string_t rev_content;
- revision_pack_t *revisions = apr_pcalloc(pool, sizeof(*revisions));
- revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
-
- /* read the whole pack file into memory */
- SVN_ERR(read_rev_or_pack_file(&file_content, fs, revision, local_pool));
-
- /* create the revision info for the current rev */
- info->node_revs = apr_array_make(pool, 4, sizeof(noderev_t*));
- info->representations = apr_array_make(pool, 4, sizeof(representation_t*));
-
- info->revision = revision;
- info->original.offset = 0;
- info->original.end = file_content->len;
- SVN_ERR(read_revision_header(&info->original.changes,
- &info->original.changes_len,
- &root_node_offset,
- file_content,
- 0,
- info->original.end,
- local_pool));
-
- /* put it into our containers */
- APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
-
- /* create a pseudo-pack file container for just this rev to keep our
- * data structures as uniform as possible.
- */
- revisions->base = revision;
- revisions->fragments = NULL;
- revisions->info = apr_array_make(pool, 1, sizeof(revision_info_t*));
- revisions->filesize = file_content->len;
- APR_ARRAY_PUSH(revisions->info, revision_info_t*) = info;
- APR_ARRAY_PUSH(fs->packs, revision_pack_t*) = revisions;
-
- /* cache the revision content */
- rev_content.data = file_content->data + info->original.offset;
- rev_content.len = info->original.end - info->original.offset;
- set_cached_content(fs->cache, info->revision, &rev_content);
-
- /* parse the revision content recursively. */
- SVN_ERR(read_noderev(&info->root_noderev, fs, file_content,
- root_node_offset, info,
- pool, local_pool));
- APR_ARRAY_PUSH(info->node_revs, noderev_t*) = info->root_noderev;
-
- /* show progress every 1000 revs or so */
- if (revision % fs->max_files_per_dir == 0)
- print_progress(revision);
-
- svn_pool_destroy(local_pool);
-
- return SVN_NO_ERROR;
-}
-
-/* Read the repository at PATH beginning with revision START_REVISION and
- * return the result in *FS. Allocate caches with MEMSIZE bytes total
- * capacity. Use POOL for non-cache allocations.
- */
-static svn_error_t *
-read_revisions(fs_fs_t **fs,
- const char *path,
- svn_revnum_t start_revision,
- apr_size_t memsize,
- apr_pool_t *pool)
-{
- svn_revnum_t revision;
- apr_size_t content_cache_size;
- apr_size_t window_cache_size;
- apr_size_t dir_cache_size;
-
- /* determine cache sizes */
- if (memsize < 100)
- memsize = 100;
-
- content_cache_size = memsize * 7 / 10 > 4000 ? 4000 : memsize * 7 / 10;
- window_cache_size = memsize * 2 / 10 * 1024 * 1024;
- dir_cache_size = (memsize / 10) * 16000;
-
- /* read repo format and such */
- SVN_ERR(fs_open(fs, path, pool));
-
- /* create data containers and caches */
- (*fs)->start_revision = start_revision
- - (start_revision % (*fs)->max_files_per_dir);
- (*fs)->revisions = apr_array_make(pool,
- (*fs)->max_revision + 1 - (*fs)->start_revision,
- sizeof(revision_info_t *));
- (*fs)->packs = apr_array_make(pool,
- ((*fs)->min_unpacked_rev - (*fs)->start_revision)
- / (*fs)->max_files_per_dir,
- sizeof(revision_pack_t *));
- (*fs)->null_base = apr_pcalloc(pool, sizeof(*(*fs)->null_base));
- (*fs)->cache = create_content_cache
- (apr_allocator_owner_get
- (svn_pool_create_allocator(FALSE)),
- content_cache_size * 1024 * 1024);
- (*fs)->dir_cache = create_dir_cache
- (apr_allocator_owner_get
- (svn_pool_create_allocator(FALSE)),
- dir_cache_size);
- (*fs)->window_cache = create_window_cache
- (apr_allocator_owner_get
- (svn_pool_create_allocator(FALSE)),
- 10000, window_cache_size);
-
- /* read all packed revs */
- for ( revision = start_revision
- ; revision < (*fs)->min_unpacked_rev
- ; revision += (*fs)->max_files_per_dir)
- SVN_ERR(read_pack_file(*fs, revision, pool));
-
- /* read non-packed revs */
- for ( ; revision <= (*fs)->max_revision; ++revision)
- SVN_ERR(read_revision_file(*fs, revision, pool));
-
- return SVN_NO_ERROR;
-}
-
-/* Return the maximum number of decimal digits required to represent offsets
- * in the given PACK file.
- */
-static apr_size_t
-get_max_offset_len(const revision_pack_t *pack)
-{
- /* the pack files may grow a few percent.
- * Fudge it up to be on safe side.
- */
- apr_size_t max_future_size = pack->filesize * 2 + 10000;
- apr_size_t result = 0;
-
- while (max_future_size > 0)
- {
- ++result;
- max_future_size /= 10;
- }
-
- return result;
-}
-
-/* Create the fragments container in PACK and add revision header fragments
- * to it. Use POOL for allocations.
- */
-static svn_error_t *
-add_revisions_pack_heads(revision_pack_t *pack,
- apr_pool_t *pool)
-{
- int i;
- revision_info_t *info;
- apr_size_t offset_len = get_max_offset_len(pack);
- fragment_t fragment;
-
- /* allocate fragment arrays */
-
- int fragment_count = 1;
- for (i = 0; i < pack->info->nelts; ++i)
- {
- info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
- fragment_count += info->node_revs->nelts
- + info->representations->nelts
- + 2;
- }
-
- pack->target_offset = pack->info->nelts > 1 ? 64 : 0;
- pack->fragments = apr_array_make(pool,
- fragment_count,
- sizeof(fragment_t));
-
- /* put revision headers first */
-
- for (i = 0; i < pack->info->nelts - 1; ++i)
- {
- info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
- info->target.offset = pack->target_offset;
-
- fragment.data = info;
- fragment.kind = header_fragment;
- fragment.position = pack->target_offset;
- APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
-
- pack->target_offset += 2 * offset_len + 3;
- }
-
- info = APR_ARRAY_IDX(pack->info, pack->info->nelts - 1, revision_info_t*);
- info->target.offset = pack->target_offset;
-
- /* followed by the changes list */
-
- for (i = 0; i < pack->info->nelts; ++i)
- {
- info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
-
- info->target.changes = pack->target_offset - info->target.offset;
- info->target.changes_len = info->original.changes_len;
-
- fragment.data = info;
- fragment.kind = changes_fragment;
- fragment.position = pack->target_offset;
- APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
-
- pack->target_offset += info->original.changes_len;
- }
-
- return SVN_NO_ERROR;
-}
-
-/* For the revision given by INFO in FS, return the fragment container in
- * *FRAGMENTS and the current placement offset in *CURRENT_POS.
- */
-static svn_error_t *
-get_target_offset(apr_size_t **current_pos,
- apr_array_header_t **fragments,
- fs_fs_t *fs,
- revision_info_t *info)
-{
- int i;
- revision_pack_t *pack;
- svn_revnum_t revision = info->revision;
-
- /* identify the pack object */
- if (fs->min_unpacked_rev > revision)
- {
- i = (revision - fs->start_revision) / fs->max_files_per_dir;
- }
- else
- {
- i = (fs->min_unpacked_rev - fs->start_revision) / fs->max_files_per_dir;
- i += revision - fs->min_unpacked_rev;
- }
-
- /* extract the desired info from it */
- pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
- *current_pos = &pack->target_offset;
- *fragments = pack->fragments;
-
- return SVN_NO_ERROR;
-}
-
-/* forward declaration */
-static svn_error_t *
-add_noderev_recursively(fs_fs_t *fs,
- noderev_t *node,
- apr_pool_t *pool);
-
-/* Place fragments for the given REPRESENTATION of the given KIND, iff it
- * has not been covered, yet. Place the base reps along the deltification
- * chain as far as those reps have not been covered, yet. If REPRESENTATION
- * is a directory, recursively place its elements.
- *
- * Use POOL for allocations.
- */
-static svn_error_t *
-add_representation_recursively(fs_fs_t *fs,
- representation_t *representation,
- enum fragment_kind_t kind,
- apr_pool_t *pool)
-{
- apr_size_t *current_pos;
- apr_array_header_t *fragments;
- fragment_t fragment;
-
- /* place REPRESENTATION only once and only if it exists and will not
- * be covered later as a directory. */
- if ( representation == NULL
- || representation->covered
- || (representation->dir && kind != dir_fragment)
- || representation == fs->null_base)
- return SVN_NO_ERROR;
-
- /* add and place a fragment for REPRESENTATION */
- SVN_ERR(get_target_offset(&current_pos, &fragments,
- fs, representation->revision));
- representation->target.offset = *current_pos;
- representation->covered = TRUE;
-
- fragment.data = representation;
- fragment.kind = kind;
- fragment.position = *current_pos;
- APR_ARRAY_PUSH(fragments, fragment_t) = fragment;
-
- /* determine the size of data to be added to the target file */
- if ( kind != dir_fragment
- && representation->delta_base && representation->delta_base->dir)
- {
- /* base rep is a dir -> would change -> need to store it as fulltext
- * in our target file */
- apr_pool_t *text_pool = svn_pool_create(pool);
- svn_stringbuf_t *content;
-
- SVN_ERR(get_combined_window(&content, fs, representation, text_pool));
- representation->target.size = content->len;
- *current_pos += representation->target.size + 13;
-
- svn_pool_destroy(text_pool);
- }
- else
- if ( kind == dir_fragment
- || (representation->delta_base && representation->delta_base->dir))
- {
- /* deltified directories may grow considerably */
- if (representation->original.size < 50)
- *current_pos += 300;
- else
- *current_pos += representation->original.size * 3 + 150;
- }
- else
- {
- /* plain / deltified content will not change but the header may
- * grow slightly due to larger offsets. */
- representation->target.size = representation->original.size;
-
- if (representation->delta_base &&
- (representation->delta_base != fs->null_base))
- *current_pos += representation->original.size + 50;
- else
- *current_pos += representation->original.size + 13;
- }
-
- /* follow the delta chain and place base revs immediately after this */
- if (representation->delta_base)
- SVN_ERR(add_representation_recursively(fs,
- representation->delta_base,
- kind,
- pool));
-
- /* finally, recurse into directories */
- if (representation->dir)
- {
- int i;
- apr_array_header_t *entries = representation->dir->entries;
-
- for (i = 0; i < entries->nelts; ++i)
- {
- direntry_t *entry = APR_ARRAY_IDX(entries, i, direntry_t *);
- if (entry->node)
- SVN_ERR(add_noderev_recursively(fs, entry->node, pool));
- }
- }
-
- return SVN_NO_ERROR;
-}
-
-/* Place fragments for the given NODE in FS, iff it has not been covered,
- * yet. Place the reps (text, props) immediately after the node.
- *
- * Use POOL for allocations.
- */
-static svn_error_t *
-add_noderev_recursively(fs_fs_t *fs,
- noderev_t *node,
- apr_pool_t *pool)
-{
- apr_size_t *current_pos;
- apr_array_header_t *fragments;
- fragment_t fragment;
-
- /* don't add it twice */
- if (node->covered)
- return SVN_NO_ERROR;
-
- /* add and place a fragment for NODE */
- SVN_ERR(get_target_offset(&current_pos, &fragments, fs, node->revision));
- node->covered = TRUE;
- node->target.offset = *current_pos;
-
- fragment.data = node;
- fragment.kind = noderev_fragment;
- fragment.position = *current_pos;
- APR_ARRAY_PUSH(fragments, fragment_t) = fragment;
-
- /* size may slightly increase */
- *current_pos += node->original.size + 40;
-
- /* recurse into representations */
- if (node->text && node->text->dir)
- SVN_ERR(add_representation_recursively(fs, node->text, dir_fragment, pool));
- else
- SVN_ERR(add_representation_recursively(fs, node->text, file_fragment, pool));
-
- SVN_ERR(add_representation_recursively(fs, node->props, property_fragment, pool));
-
- return SVN_NO_ERROR;
-}
-
-/* Place a fragment for the last revision in PACK. Use POOL for allocations.
- */
-static svn_error_t *
-add_revisions_pack_tail(revision_pack_t *pack,
- apr_pool_t *pool)
-{
- int i;
- revision_info_t *info;
- apr_size_t offset_len = get_max_offset_len(pack);
- fragment_t fragment;
-
- /* put final revision header last and fix up revision lengths */
-
- info = APR_ARRAY_IDX(pack->info, pack->info->nelts-1, revision_info_t*);
-
- fragment.data = info;
- fragment.kind = header_fragment;
- fragment.position = pack->target_offset;
- APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
-
- pack->target_offset += 2 * offset_len + 3;
-
- /* end of target file reached. Store that info in all revs. */
- for (i = 0; i < pack->info->nelts; ++i)
- {
- info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
- info->target.end = pack->target_offset;
- }
-
- return SVN_NO_ERROR;
-}
-
-/* Place all fragments for all revisions / packs in FS.
- * Use POOL for allocations.
- */
-static svn_error_t *
-reorder_revisions(fs_fs_t *fs,
- apr_pool_t *pool)
-{
- int i, k;
-
- /* headers and changes */
-
- for (i = 0; i < fs->packs->nelts; ++i)
- {
- revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
- SVN_ERR(add_revisions_pack_heads(pack, pool));
- }
-
- /* representations & nodes */
-
- for (i = fs->revisions->nelts-1; i >= 0; --i)
- {
- revision_info_t *info = APR_ARRAY_IDX(fs->revisions, i, revision_info_t*);
- for (k = info->node_revs->nelts - 1; k >= 0; --k)
- {
- noderev_t *node = APR_ARRAY_IDX(info->node_revs, k, noderev_t*);
- SVN_ERR(add_noderev_recursively(fs, node, pool));
- }
-
- if (info->revision % fs->max_files_per_dir == 0)
- print_progress(info->revision);
- }
-
- /* pack file tails */
-
- for (i = 0; i < fs->packs->nelts; ++i)
- {
- revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
- SVN_ERR(add_revisions_pack_tail(pack, pool));
- }
-
- return SVN_NO_ERROR;
-}
-
-/* forward declaration */
-static svn_error_t *
-get_fragment_content(svn_string_t **content,
- fs_fs_t *fs,
- fragment_t *fragment,
- apr_pool_t *pool);
-
-/* Directory content may change and with it, the deltified representations
- * may significantly. This function causes all directory target reps in
- * PACK of FS to be built and their new MD5 as well as rep sizes be updated.
- * We must do that before attempting to write noderevs.
- *
- * Use POOL for allocations.
- */
-static svn_error_t *
-update_noderevs(fs_fs_t *fs,
- revision_pack_t *pack,
- apr_pool_t *pool)
-{
- int i;
- apr_pool_t *itempool = svn_pool_create(pool);
-
- for (i = 0; i < pack->fragments->nelts; ++i)
- {
- fragment_t *fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
- if (fragment->kind == dir_fragment)
- {
- svn_string_t *content;
-
- /* request updated rep content but ignore the result.
- * We are only interested in the MD5, content and rep size updates. */
- SVN_ERR(get_fragment_content(&content, fs, fragment, itempool));
- svn_pool_clear(itempool);
- }
- }
-
- svn_pool_destroy(itempool);
-
- return SVN_NO_ERROR;
-}
-
-/* Determine the target size of the FRAGMENT in FS and return the value
- * in *LENGTH. If ADD_PADDING has been set, slightly fudge the numbers
- * to account for changes in offset lengths etc. Use POOL for temporary
- * allocations.
- */
-static svn_error_t *
-get_content_length(apr_size_t *length,
- fs_fs_t *fs,
- fragment_t *fragment,
- svn_boolean_t add_padding,
- apr_pool_t *pool)
-{
- svn_string_t *content;
-
- SVN_ERR(get_fragment_content(&content, fs, fragment, pool));
- if (add_padding)
- switch (fragment->kind)
- {
- case dir_fragment:
- *length = content->len + 16;
- break;
- case noderev_fragment:
- *length = content->len + 3;
- break;
- default:
- *length = content->len;
- break;
- }
- else
- *length = content->len;
-
- return SVN_NO_ERROR;
-}
-
-/* Move the FRAGMENT to global file offset NEW_POSITION. Update the target
- * location info of the underlying object as well.
- */
-static void
-move_fragment(fragment_t *fragment,
- apr_size_t new_position)
-{
- revision_info_t *info;
- representation_t *representation;
- noderev_t *node;
-
- /* move the fragment */
- fragment->position = new_position;
-
- /* move the underlying object */
- switch (fragment->kind)
- {
- case header_fragment:
- info = fragment->data;
- info->target.offset = new_position;
- break;
-
- case changes_fragment:
- info = fragment->data;
- info->target.changes = new_position - info->target.offset;
- break;
-
- case property_fragment:
- case file_fragment:
- case dir_fragment:
- representation = fragment->data;
- representation->target.offset = new_position;
- break;
-
- case noderev_fragment:
- node = fragment->data;
- node->target.offset = new_position;
- break;
- }
-}
-
-/* Move the fragments in PACK's target fragment list to their final offsets.
- * This may require several iterations if the fudge factors turned out to
- * be insufficient. Use POOL for allocations.
- */
-static svn_error_t *
-pack_revisions(fs_fs_t *fs,
- revision_pack_t *pack,
- apr_pool_t *pool)
-{
- int i;
- fragment_t *fragment, *next;
- svn_boolean_t needed_to_expand;
- revision_info_t *info;
- apr_size_t current_pos, len, old_len;
-
- apr_pool_t *itempool = svn_pool_create(pool);
-
- /* update all directory reps. Chances are that most of the target rep
- * sizes are now close to accurate. */
- SVN_ERR(update_noderevs(fs, pack, pool));
-
- /* compression phase: pack all fragments tightly with only a very small
- * fudge factor. This should cause offsets to shrink, thus all the
- * actual fragment rate should tend to be even smaller afterwards. */
- current_pos = pack->info->nelts > 1 ? 64 : 0;
- for (i = 0; i + 1 < pack->fragments->nelts; ++i)
- {
- fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
- SVN_ERR(get_content_length(&len, fs, fragment, TRUE, itempool));
- move_fragment(fragment, current_pos);
- current_pos += len;
-
- svn_pool_clear(itempool);
- }
-
- /* don't forget the final fragment (last revision's revision header) */
- fragment = &APR_ARRAY_IDX(pack->fragments, pack->fragments->nelts-1, fragment_t);
- fragment->position = current_pos;
-
- /* expansion phase: check whether all fragments fit into their allotted
- * slots. Grow them geometrically if they don't fit. Retry until they
- * all do fit.
- * Note: there is an upper limit to which fragments can grow. So, this
- * loop will terminate. Often, no expansion will be necessary at all. */
- do
- {
- needed_to_expand = FALSE;
- current_pos = pack->info->nelts > 1 ? 64 : 0;
-
- for (i = 0; i + 1 < pack->fragments->nelts; ++i)
- {
- fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
- next = &APR_ARRAY_IDX(pack->fragments, i + 1, fragment_t);
- old_len = next->position - fragment->position;
-
- SVN_ERR(get_content_length(&len, fs, fragment, FALSE, itempool));
-
- if (len > old_len)
- {
- len = (apr_size_t)(len * 1.1) + 10;
- needed_to_expand = TRUE;
- }
- else
- len = old_len;
-
- if (i == pack->info->nelts - 1)
- {
- info = APR_ARRAY_IDX(pack->info, pack->info->nelts - 1, revision_info_t*);
- info->target.offset = current_pos;
- }
-
- move_fragment(fragment, current_pos);
- current_pos += len;
-
- svn_pool_clear(itempool);
- }
-
- fragment = &APR_ARRAY_IDX(pack->fragments, pack->fragments->nelts-1, fragment_t);
- fragment->position = current_pos;
-
- /* update the revision
- * sizes (they all end at the end of the pack file now) */
- SVN_ERR(get_content_length(&len, fs, fragment, FALSE, itempool));
- current_pos += len;
-
- for (i = 0; i < pack->info->nelts; ++i)
- {
- info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
- info->target.end = current_pos;
- }
- }
- while (needed_to_expand);
-
- svn_pool_destroy(itempool);
-
- return SVN_NO_ERROR;
-}
-
-/* Write reorg'ed target content for PACK in FS. Use POOL for allocations.
- */
-static svn_error_t *
-write_revisions(fs_fs_t *fs,
- revision_pack_t *pack,
- apr_pool_t *pool)
-{
- int i;
- fragment_t *fragment = NULL;
- svn_string_t *content;
-
- apr_pool_t *itempool = svn_pool_create(pool);
- apr_pool_t *iterpool = svn_pool_create(pool);
-
- apr_file_t *file;
- apr_size_t current_pos = 0;
- svn_stringbuf_t *null_buffer = svn_stringbuf_create_empty(iterpool);
-
- /* create the target file */
- const char *dir = apr_psprintf(iterpool, "%s/new/%ld%s",
- fs->path, pack->base / fs->max_files_per_dir,
- pack->info->nelts > 1 ? ".pack" : "");
- SVN_ERR(svn_io_make_dir_recursively(dir, pool));
- SVN_ERR(svn_io_file_open(&file,
- pack->info->nelts > 1
- ? apr_psprintf(iterpool, "%s/pack", dir)
- : apr_psprintf(iterpool, "%s/%ld", dir, pack->base),
- APR_WRITE | APR_CREATE | APR_BUFFERED,
- APR_OS_DEFAULT,
- iterpool));
-
- /* write all fragments */
- for (i = 0; i < pack->fragments->nelts; ++i)
- {
- apr_size_t padding;
-
- /* get fragment content to write */
- fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
- SVN_ERR(get_fragment_content(&content, fs, fragment, itempool));
- SVN_ERR_ASSERT(fragment->position >= current_pos);
-
- /* number of bytes between this and the previous fragment */
- if ( fragment->kind == header_fragment
- && i+1 < pack->fragments->nelts)
- /* special case: header fragments are aligned to the slot end */
- padding = APR_ARRAY_IDX(pack->fragments, i+1, fragment_t).position -
- content->len - current_pos;
- else
- /* standard case: fragments are aligned to the slot start */
- padding = fragment->position - current_pos;
-
- /* write padding between fragments */
- if (padding)
- {
- while (null_buffer->len < padding)
- svn_stringbuf_appendbyte(null_buffer, 0);
-
- SVN_ERR(svn_io_file_write_full(file,
- null_buffer->data,
- padding,
- NULL,
- itempool));
- current_pos += padding;
- }
-
- /* write fragment content */
- SVN_ERR(svn_io_file_write_full(file,
- content->data,
- content->len,
- NULL,
- itempool));
- current_pos += content->len;
-
- svn_pool_clear(itempool);
- }
-
- apr_file_close(file);
-
- /* write new manifest file */
- if (pack->info->nelts > 1)
- {
- svn_stream_t *stream;
- SVN_ERR(svn_io_file_open(&file,
- apr_psprintf(iterpool, "%s/manifest", dir),
- APR_WRITE | APR_CREATE | APR_BUFFERED,
- APR_OS_DEFAULT,
- iterpool));
- stream = svn_stream_from_aprfile2(file, FALSE, iterpool);
-
- for (i = 0; i < pack->info->nelts; ++i)
- {
- revision_info_t *info = APR_ARRAY_IDX(pack->info, i,
- revision_info_t *);
- SVN_ERR(svn_stream_printf(stream, itempool,
- "%" APR_SIZE_T_FMT "\n",
- info->target.offset));
- svn_pool_clear(itempool);
- }
- }
-
- /* cleanup */
- svn_pool_destroy(itempool);
- svn_pool_destroy(iterpool);
-
- return SVN_NO_ERROR;
-}
-
-/* Write reorg'ed target content for all revisions in FS. To maximize
- * data locality, pack and write in one go per pack file.
- * Use POOL for allocations.
- */
-static svn_error_t *
-pack_and_write_revisions(fs_fs_t *fs,
- apr_pool_t *pool)
-{
- int i;
-
- SVN_ERR(svn_io_make_dir_recursively(apr_psprintf(pool, "%s/new",
- fs->path),
- pool));
-
- for (i = 0; i < fs->packs->nelts; ++i)
- {
- revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
- if (pack->base % fs->max_files_per_dir == 0)
- print_progress(pack->base);
-
- SVN_ERR(pack_revisions(fs, pack, pool));
- SVN_ERR(write_revisions(fs, pack, pool));
- }
-
- return SVN_NO_ERROR;
-}
-
-/* For the directory REPRESENTATION in FS, construct the new (target)
- * serialized plaintext representation and return it in *CONTENT.
- * Allocate the result in POOL and temporaries in SCRATCH_POOL.
- */
-static svn_error_t *
-get_updated_dir(svn_string_t **content,
- fs_fs_t *fs,
- representation_t *representation,
- apr_pool_t *pool,
- apr_pool_t *scratch_pool)
-{
- apr_hash_t *hash;
- apr_pool_t *hash_pool = svn_pool_create(scratch_pool);
- apr_array_header_t *dir = representation->dir->entries;
- int i;
- svn_stream_t *stream;
- svn_stringbuf_t *result;
-
- /* get the original content */
- SVN_ERR(read_dir(&hash, fs, representation, scratch_pool));
- hash = apr_hash_copy(hash_pool, hash);
-
- /* update all entries */
- for (i = 0; i < dir->nelts; ++i)
- {
- char buffer[256];
- svn_string_t *new_val;
- apr_size_t pos;
-
- /* find the original entry for for the current name */
- direntry_t *entry = APR_ARRAY_IDX(dir, i, direntry_t *);
- svn_string_t *str_val = apr_hash_get(hash, entry->name, entry->name_len);
- if (str_val == NULL)
- return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL,
- _("Dir entry '%s' not found"), entry->name);
-
- SVN_ERR_ASSERT(str_val->len < sizeof(buffer));
-
- /* create and updated node ID */
- memcpy(buffer, str_val->data, str_val->len+1);
- pos = strchr(buffer, '/') - buffer + 1;
- pos += svn__ui64toa(buffer + pos, entry->node->target.offset - entry->node->revision->target.offset);
- new_val = svn_string_ncreate(buffer, pos, hash_pool);
-
- /* store it in the hash */
- apr_hash_set(hash, entry->name, entry->name_len, new_val);
- }
-
- /* serialize the updated hash */
- result = svn_stringbuf_create_ensure(representation->target.size, pool);
- stream = svn_stream_from_stringbuf(result, hash_pool);
- SVN_ERR(svn_hash_write2(hash, stream, SVN_HASH_TERMINATOR, hash_pool));
- svn_pool_destroy(hash_pool);
-
- /* done */
- *content = svn_stringbuf__morph_into_string(result);
-
- return SVN_NO_ERROR;
-}
-
-/* Calculate the delta representation for the given CONTENT and BASE.
- * Return the rep in *DIFF. Use POOL for allocations.
- */
-static svn_error_t *
-diff_stringbufs(svn_stringbuf_t *diff,
- svn_string_t *base,
- svn_string_t *content,
- apr_pool_t *pool)
-{
- svn_txdelta_window_handler_t diff_wh;
- void *diff_whb;
-
- svn_stream_t *stream;
- svn_stream_t *source = svn_stream_from_string(base, pool);
- svn_stream_t *target = svn_stream_from_stringbuf(diff, pool);
-
- /* Prepare to write the svndiff data. */
- svn_txdelta_to_svndiff3(&diff_wh,
- &diff_whb,
- target,
- 1,
- SVN_DELTA_COMPRESSION_LEVEL_DEFAULT,
- pool);
-
- /* create delta stream */
- stream = svn_txdelta_target_push(diff_wh, diff_whb, source, pool);
-
- /* run delta */
- SVN_ERR(svn_stream_write(stream, content->data, &content->len));
- SVN_ERR(svn_stream_close(stream));
-
- return SVN_NO_ERROR;
-}
-
-/* Update the noderev id value for KEY in the textual noderev representation
- * in NODE_REV. Take the new id from NODE. This is a no-op if the KEY
- * cannot be found.
- */
-static void
-update_id(svn_stringbuf_t *node_rev,
- const char *key,
- noderev_t *node)
-{
- char *newline_pos = 0;
- char *pos;
-
- /* we need to update the offset only -> find its position */
- pos = strstr(node_rev->data, key);
- if (pos)
- pos = strchr(pos, '/');
- if (pos)
- newline_pos = strchr(++pos, '\n');
-
- if (pos && newline_pos)
- {
- /* offset data has been found -> replace it */
- char temp[SVN_INT64_BUFFER_SIZE];
- apr_size_t len = svn__i64toa(temp, node->target.offset - node->revision->target.offset);
- svn_stringbuf_replace(node_rev,
- pos - node_rev->data, newline_pos - pos,
- temp, len);
- }
-}
-
-/* Update the representation id value for KEY in the textual noderev
- * representation in NODE_REV. Take the offset, sizes and new MD5 from
- * REPRESENTATION. Use SCRATCH_POOL for allocations.
- * This is a no-op if the KEY cannot be found.
- */
-static void
-update_text(svn_stringbuf_t *node_rev,
- const char *key,
- representation_t *representation,
- apr_pool_t *scratch_pool)
-{
- apr_size_t key_len = strlen(key);
- char *pos = strstr(node_rev->data, key);
- char *val_pos;
-
- if (!pos)
- return;
-
- val_pos = pos + key_len;
- if (representation->dir)
- {
- /* for directories, we need to write all rep info anew */
- char *newline_pos = strchr(val_pos, '\n');
- svn_checksum_t checksum;
- const char* temp = apr_psprintf(scratch_pool, "%ld %" APR_SIZE_T_FMT " %"
- APR_SIZE_T_FMT" %" APR_SIZE_T_FMT " %s",
- representation->revision->revision,
- representation->target.offset - representation->revision->target.offset,
- representation->target.size,
- representation->dir->size,
- svn_checksum_to_cstring(&checksum,
- scratch_pool));
-
- checksum.digest = representation->dir->target_md5;
- checksum.kind = svn_checksum_md5;
- svn_stringbuf_replace(node_rev,
- val_pos - node_rev->data, newline_pos - val_pos,
- temp, strlen(temp));
- }
- else
- {
- /* ordinary representation: replace offset and rep size only.
- * Content size and checksums are unchanged. */
- const char* temp;
- char *end_pos = strchr(val_pos, ' ');
-
- val_pos = end_pos + 1;
- end_pos = strchr(strchr(val_pos, ' ') + 1, ' ');
- temp = apr_psprintf(scratch_pool, "%" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT,
- representation->target.offset - representation->revision->target.offset,
- representation->target.size);
-
- svn_stringbuf_replace(node_rev,
- val_pos - node_rev->data, end_pos - val_pos,
- temp, strlen(temp));
- }
-}
-
-/* Get the target content (data block as to be written to the file) for
- * the given FRAGMENT in FS. Return the content in *CONTENT. Use POOL
- * for allocations.
- *
- * Note that, as a side-effect, this will update the target rep. info for
- * directories.
- */
-static svn_error_t *
-get_fragment_content(svn_string_t **content,
- fs_fs_t *fs,
- fragment_t *fragment,
- apr_pool_t *pool)
-{
- revision_info_t *info;
- representation_t *representation;
- noderev_t *node;
- svn_string_t *revision_content, *base_content;
- svn_stringbuf_t *header, *node_rev, *text;
- apr_size_t header_size;
- svn_checksum_t *checksum = NULL;
-
- switch (fragment->kind)
- {
- /* revision headers can be constructed from target position info */
- case header_fragment:
- info = fragment->data;
- *content = svn_string_createf(pool,
- "\n%" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
- info->root_noderev->target.offset - info->target.offset,
- info->target.changes);
- return SVN_NO_ERROR;
-
- /* The changes list remains untouched */
- case changes_fragment:
- info = fragment->data;
- SVN_ERR(get_content(&revision_content, fs, info->revision, pool));
-
- *content = svn_string_create_empty(pool);
- (*content)->data = revision_content->data + info->original.changes;
- (*content)->len = info->target.changes_len;
- return SVN_NO_ERROR;
-
- /* property and file reps get new headers any need to be rewritten,
- * iff the base rep is a directory. The actual (deltified) content
- * remains unchanged, though. MD5 etc. do not change. */
- case property_fragment:
- case file_fragment:
- representation = fragment->data;
- SVN_ERR(get_content(&revision_content, fs,
- representation->revision->revision, pool));
-
- if (representation->delta_base)
- if (representation->delta_base->dir)
- {
- /* if the base happens to be a directory, reconstruct the
- * full text and represent it as PLAIN rep. */
- SVN_ERR(get_combined_window(&text, fs, representation, pool));
- representation->target.size = text->len;
-
- svn_stringbuf_insert(text, 0, "PLAIN\n", 6);
- svn_stringbuf_appendcstr(text, "ENDREP\n");
- *content = svn_stringbuf__morph_into_string(text);
-
- return SVN_NO_ERROR;
- }
- else
- /* construct a new rep header */
- if (representation->delta_base == fs->null_base)
- header = svn_stringbuf_create("DELTA\n", pool);
- else
- header = svn_stringbuf_createf(pool,
- "DELTA %ld %" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
- representation->delta_base->revision->revision,
- representation->delta_base->target.offset
- - representation->delta_base->revision->target.offset,
- representation->delta_base->target.size);
- else
- header = svn_stringbuf_create("PLAIN\n", pool);
-
- /* if it exists, the actual delta base is unchanged. Hence, this
- * rep is unchanged even if it has been deltified. */
- header_size = strchr(revision_content->data +
- representation->original.offset, '\n') -
- revision_content->data -
- representation->original.offset + 1;
- svn_stringbuf_appendbytes(header,
- revision_content->data +
- representation->original.offset +
- header_size,
- representation->original.size);
- svn_stringbuf_appendcstr(header, "ENDREP\n");
- *content = svn_stringbuf__morph_into_string(header);
- return SVN_NO_ERROR;
-
- /* directory reps need to be rewritten (and deltified) completely.
- * As a side-effect, update the MD5 and target content size. */
- case dir_fragment:
- /* construct new content and update MD5 */
- representation = fragment->data;
- SVN_ERR(get_updated_dir(&revision_content, fs, representation,
- pool, pool));
- SVN_ERR(svn_checksum(&checksum, svn_checksum_md5,
- revision_content->data, revision_content->len,
- pool));
- memcpy(representation->dir->target_md5,
- checksum->digest,
- sizeof(representation->dir->target_md5));
-
- /* deltify against the base rep if necessary */
- if (representation->delta_base)
- {
- if (representation->delta_base->dir == NULL)
- {
- /* dummy or non-dir base rep -> self-compress only */
- header = svn_stringbuf_create("DELTA\n", pool);
- base_content = svn_string_create_empty(pool);
- }
- else
- {
- /* deltify against base rep (which is a directory, too)*/
- representation_t *base_rep = representation->delta_base;
- header = svn_stringbuf_createf(pool,
- "DELTA %ld %" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
- base_rep->revision->revision,
- base_rep->target.offset - base_rep->revision->target.offset,
- base_rep->target.size);
- SVN_ERR(get_updated_dir(&base_content, fs, base_rep,
- pool, pool));
- }
-
- /* run deltification and update target content size */
- header_size = header->len;
- SVN_ERR(diff_stringbufs(header, base_content,
- revision_content, pool));
- representation->dir->size = revision_content->len;
- representation->target.size = header->len - header_size;
- svn_stringbuf_appendcstr(header, "ENDREP\n");
- *content = svn_stringbuf__morph_into_string(header);
- }
- else
- {
- /* no delta base (not even a dummy) -> PLAIN rep */
- representation->target.size = revision_content->len;
- representation->dir->size = revision_content->len;
- *content = svn_string_createf(pool, "PLAIN\n%sENDREP\n",
- revision_content->data);
- }
-
- return SVN_NO_ERROR;
-
- /* construct the new noderev content. No side-effects.*/
- case noderev_fragment:
- /* get the original noderev as string */
- node = fragment->data;
- SVN_ERR(get_content(&revision_content, fs,
- node->revision->revision, pool));
- node_rev = svn_stringbuf_ncreate(revision_content->data +
- node->original.offset,
- node->original.size,
- pool);
-
- /* update the values that may have hanged for target */
- update_id(node_rev, "id: ", node);
- update_id(node_rev, "pred: ", node->predecessor);
- update_text(node_rev, "text: ", node->text, pool);
- update_text(node_rev, "props: ", node->props, pool);
-
- *content = svn_stringbuf__morph_into_string(node_rev);
- return SVN_NO_ERROR;
- }
-
- SVN_ERR_ASSERT(0);
-
- return SVN_NO_ERROR;
-}
-
-/* In the repository at PATH, restore the original content in case we ran
- * this reorg tool before. Use POOL for allocations.
- */
-static svn_error_t *
-prepare_repo(const char *path, apr_pool_t *pool)
-{
- svn_node_kind_t kind;
-
- const char *old_path = svn_dirent_join(path, "db/old", pool);
- const char *new_path = svn_dirent_join(path, "new", pool);
- const char *revs_path = svn_dirent_join(path, "db/revs", pool);
- const char *old_rep_cache_path = svn_dirent_join(path, "db/rep-cache.db.old", pool);
- const char *rep_cache_path = svn_dirent_join(path, "db/rep-cache.db", pool);
-
- /* is there a backup? */
- SVN_ERR(svn_io_check_path(old_path, &kind, pool));
- if (kind == svn_node_dir)
- {
- /* yes, restore the org content from it */
- SVN_ERR(svn_io_remove_dir2(new_path, TRUE, NULL, NULL, pool));
- SVN_ERR(svn_io_file_move(revs_path, new_path, pool));
- SVN_ERR(svn_io_file_move(old_path, revs_path, pool));
- SVN_ERR(svn_io_remove_dir2(new_path, TRUE, NULL, NULL, pool));
- }
-
- /* same for the rep cache db */
- SVN_ERR(svn_io_check_path(old_rep_cache_path, &kind, pool));
- if (kind == svn_node_file)
- SVN_ERR(svn_io_file_move(old_rep_cache_path, rep_cache_path, pool));
-
- return SVN_NO_ERROR;
-}
-
-/* In the repository at PATH, create a backup of the orig content and
- * replace it with the reorg'ed. Use POOL for allocations.
- */
-static svn_error_t *
-activate_new_revs(const char *path, apr_pool_t *pool)
-{
- svn_node_kind_t kind;
-
- const char *old_path = svn_dirent_join(path, "db/old", pool);
- const char *new_path = svn_dirent_join(path, "new", pool);
- const char *revs_path = svn_dirent_join(path, "db/revs", pool);
- const char *old_rep_cache_path = svn_dirent_join(path, "db/rep-cache.db.old", pool);
- const char *rep_cache_path = svn_dirent_join(path, "db/rep-cache.db", pool);
-
- /* if there is no backup, yet, move the current repo content to the backup
- * and place it with the new (reorg'ed) data. */
- SVN_ERR(svn_io_check_path(old_path, &kind, pool));
- if (kind == svn_node_none)
- {
- SVN_ERR(svn_io_file_move(revs_path, old_path, pool));
- SVN_ERR(svn_io_file_move(new_path, revs_path, pool));
- }
-
- /* same for the rep cache db */
- SVN_ERR(svn_io_check_path(old_rep_cache_path, &kind, pool));
- if (kind == svn_node_none)
- SVN_ERR(svn_io_file_move(rep_cache_path, old_rep_cache_path, pool));
-
- return SVN_NO_ERROR;
-}
-
-/* Write tool usage info text to OSTREAM using PROGNAME as a prefix and
- * POOL for allocations.
- */
-static void
-print_usage(svn_stream_t *ostream, const char *progname,
- apr_pool_t *pool)
-{
- svn_error_clear(svn_stream_printf(ostream, pool,
- "\n"
- "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- "!!! This is an experimental tool. Don't use it on production data !!!\n"
- "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- "\n"
- "Usage: %s <repo> <cachesize>\n"
- "\n"
- "Optimize the repository at local path <repo> staring from revision 0.\n"
- "Use up to <cachesize> MB of memory for caching. This does not include\n"
- "temporary representation of the repository structure, i.e. the actual\n"
- "memory will be higher and <cachesize> be the lower limit.\n",
- progname));
-}
-
-/* linear control flow */
-int main(int argc, const char *argv[])
-{
- apr_pool_t *pool;
- svn_stream_t *ostream;
- svn_error_t *svn_err;
- const char *repo_path = NULL;
- svn_revnum_t start_revision = 0;
- apr_size_t memsize = 0;
- apr_uint64_t temp = 0;
- fs_fs_t *fs;
-
- apr_initialize();
- atexit(apr_terminate);
-
- pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
-
- svn_err = svn_stream_for_stdout(&ostream, pool);
- if (svn_err)
- {
- svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
- return 2;
- }
-
- if (argc != 3)
- {
- print_usage(ostream, argv[0], pool);
- return 2;
- }
-
- svn_err = svn_cstring_strtoui64(&temp, argv[2], 0, APR_SIZE_MAX, 10);
- if (svn_err)
- {
- print_usage(ostream, argv[0], pool);
- svn_error_clear(svn_err);
- return 2;
- }
-
- memsize = (apr_size_t)temp;
- repo_path = argv[1];
- start_revision = 0;
-
- printf("\nPreparing repository\n");
- svn_err = prepare_repo(repo_path, pool);
-
- if (!svn_err)
- {
- printf("Reading revisions\n");
- svn_err = read_revisions(&fs, repo_path, start_revision, memsize, pool);
- }
-
- if (!svn_err)
- {
- printf("\nReordering revision content\n");
- svn_err = reorder_revisions(fs, pool);
- }
-
- if (!svn_err)
- {
- printf("\nPacking and writing revisions\n");
- svn_err = pack_and_write_revisions(fs, pool);
- }
-
- if (!svn_err)
- {
- printf("\nSwitch to new revs\n");
- svn_err = activate_new_revs(repo_path, pool);
- }
-
- if (svn_err)
- {
- svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
- return 2;
- }
-
- return 0;
-}
diff --git a/tools/dev/gdb-py/svndbg/printers.py b/tools/dev/gdb-py/svndbg/printers.py
index da041b4..f1ee085 100644
--- a/tools/dev/gdb-py/svndbg/printers.py
+++ b/tools/dev/gdb-py/svndbg/printers.py
@@ -145,8 +145,8 @@ cstringType = gdb.lookup_type('char').pointer()
apr_hash_count = InferiorFunction('apr_hash_count')
apr_hash_first = InferiorFunction('apr_hash_first')
apr_hash_next = InferiorFunction('apr_hash_next')
-svn__apr_hash_index_key = InferiorFunction('svn__apr_hash_index_key')
-svn__apr_hash_index_val = InferiorFunction('svn__apr_hash_index_val')
+apr_hash_this_key = InferiorFunction('apr_hash_this_key')
+apr_hash_this_val = InferiorFunction('apr_hash_this_val')
def children_of_apr_hash(hash_p, value_type=None):
"""Iterate over an 'apr_hash_t *' GDB value, in the way required for a
@@ -156,9 +156,9 @@ def children_of_apr_hash(hash_p, value_type=None):
"""
hi = apr_hash_first(0, hash_p)
while (hi):
- k = svn__apr_hash_index_key(hi).reinterpret_cast(cstringType)
+ k = apr_hash_this_key(hi).reinterpret_cast(cstringType)
if value_type:
- val = svn__apr_hash_index_val(hi).reinterpret_cast(value_type)
+ val = apr_hash_this_val(hi).reinterpret_cast(value_type)
else:
val = '...'
try:
diff --git a/tools/dev/po-merge.py b/tools/dev/po-merge.py
index 15f0897..e63a739 100755
--- a/tools/dev/po-merge.py
+++ b/tools/dev/po-merge.py
@@ -146,6 +146,7 @@ def main(argv):
string_count = 0
update_count = 0
untranslated = 0
+ fuzzy = 0
while True:
comments, msgid, msgid_plural, msgstr = parse_translation(infile)
if not comments and msgid is None:
@@ -177,14 +178,19 @@ def main(argv):
for i in msgstr:
outfile.write('msgstr[%s] %s\n' % (n, msgstr[n]))
n += 1
- for m in msgstr:
- if m == '""':
- untranslated += 1
+ if msgstr is not None:
+ for m in msgstr:
+ if m == '""':
+ untranslated += 1
+ for c in comments:
+ if c.startswith('#,') and 'fuzzy' in c.split(', '):
+ fuzzy += 1
# We're done. Tell the user what we did.
print(('%d strings updated. '
+ '%d fuzzy strings. '
'%d of %d strings are still untranslated (%.0f%%).' %
- (update_count, untranslated, string_count,
+ (update_count, fuzzy, untranslated, string_count,
100.0 * untranslated / string_count)))
if __name__ == '__main__':
diff --git a/tools/dev/remove-trailing-whitespace.sh b/tools/dev/remove-trailing-whitespace.sh
index 440dfaa..1dbde0c 100755
--- a/tools/dev/remove-trailing-whitespace.sh
+++ b/tools/dev/remove-trailing-whitespace.sh
@@ -17,8 +17,8 @@
# specific language governing permissions and limitations
# under the License.
- for ext in c h cpp java py pl rb hpp cmd bat; do
- find . -name "*.$ext" -exec \
+ for ext in c h cpp java py pl rb hpp cmd bat sql sh; do
+ find . -name "*.$ext" -not -type l -exec \
perl -pi -e 's/[ \t]*$//' {} + ;
- # don't use \t to not strip ^L pagebreaks
- done
+ # don't use \s to not strip ^L pagebreaks
+ done
diff --git a/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
index aa39816..65825d5 100644
--- a/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
+++ b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
@@ -49,22 +49,6 @@
#define OPT_VERSION SVN_OPT_FIRST_LONGOPT_ID
-/** A statement macro, similar to @c SVN_INT_ERR, but issues a
- * message saying "svnraisetreeconflict:" instead of "svn:".
- *
- * Evaluate @a expr. If it yields an error, handle that error and
- * return @c EXIT_FAILURE.
- */
-#define SVNRAISETC_INT_ERR(expr) \
- do { \
- svn_error_t *svn_err__temp = (expr); \
- if (svn_err__temp) { \
- svn_handle_error2(svn_err__temp, stderr, FALSE, \
- "svnraisetreeconflict: "); \
- svn_error_clear(svn_err__temp); \
- return EXIT_FAILURE; } \
- } while (0)
-
static svn_error_t *
version(apr_pool_t *pool)
{
@@ -78,7 +62,6 @@ usage(apr_pool_t *pool)
svn_error_clear(svn_cmdline_fprintf
(stderr, pool,
_("Type 'svnraisetreeconflict --help' for usage.\n")));
- exit(1);
}
/***************************************************************************
@@ -224,7 +207,7 @@ raise_tree_conflict(int argc, const char **argv, apr_pool_t *pool)
right = svn_wc_conflict_version_create2(repos_url2, NULL, path_in_repos2,
peg_rev2, kind2, pool);
c = svn_wc_conflict_description_create_tree2(wc_abspath, kind,
- operation, left, right, pool);
+ operation, left, right, pool);
c->action = (svn_wc_conflict_action_t)action;
c->reason = (svn_wc_conflict_reason_t)reason;
@@ -295,7 +278,6 @@ help(const apr_getopt_option_t *options, apr_pool_t *pool)
get_enum_str(node_kind_map, svn_node_file),
get_enum_str(node_kind_map, svn_node_none)
));
- exit(0);
}
@@ -311,14 +293,17 @@ check_lib_versions(void)
};
SVN_VERSION_DEFINE(my_version);
- return svn_ver_check_list(&my_version, checklist);
+ return svn_ver_check_list2(&my_version, checklist, svn_ver_equal);
}
-int
-main(int argc, const char *argv[])
+/*
+ * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
+ * either return an error to be displayed, or set *EXIT_CODE to non-zero and
+ * return SVN_NO_ERROR.
+ */
+static svn_error_t *
+sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
{
- apr_pool_t *pool;
- svn_error_t *err;
apr_getopt_t *os;
const apr_getopt_option_t options[] =
{
@@ -329,33 +314,18 @@ main(int argc, const char *argv[])
};
apr_array_header_t *remaining_argv;
- /* Initialize the app. */
- if (svn_cmdline_init("svnraisetreeconflict", stderr) != EXIT_SUCCESS)
- return EXIT_FAILURE;
-
- /* Create our top-level pool. Use a separate mutexless allocator,
- * given this application is single threaded.
- */
- pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
-
/* Check library versions */
- err = check_lib_versions();
- if (err)
- return svn_cmdline_handle_exit_error(err, pool, "svnraisetreeconflict: ");
+ SVN_ERR(check_lib_versions());
#if defined(WIN32) || defined(__CYGWIN__)
/* Set the working copy administrative directory name. */
if (getenv("SVN_ASP_DOT_NET_HACK"))
{
- err = svn_wc_set_adm_dir("_svn", pool);
- if (err)
- return svn_cmdline_handle_exit_error(err, pool, "svnraisetreeconflict: ");
+ SVN_ERR(svn_wc_set_adm_dir("_svn", pool));
}
#endif
- err = svn_cmdline__getopt_init(&os, argc, argv, pool);
- if (err)
- return svn_cmdline_handle_exit_error(err, pool, "svnraisetreeconflict: ");
+ SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
os->interleave = 1;
while (1)
@@ -366,19 +336,24 @@ main(int argc, const char *argv[])
if (APR_STATUS_IS_EOF(status))
break;
if (status != APR_SUCCESS)
- usage(pool); /* this will exit() */
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
switch (opt)
{
case 'h':
help(options, pool);
- break;
+ return SVN_NO_ERROR;
case OPT_VERSION:
- SVNRAISETC_INT_ERR(version(pool));
- exit(0);
- break;
+ SVN_ERR(version(pool));
+ return SVN_NO_ERROR;
default:
- usage(pool); /* this will exit() */
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
}
}
@@ -388,23 +363,53 @@ main(int argc, const char *argv[])
{
const char *s;
- SVNRAISETC_INT_ERR(svn_utf_cstring_to_utf8(&s, os->argv[os->ind++],
- pool));
+ SVN_ERR(svn_utf_cstring_to_utf8(&s, os->argv[os->ind++], pool));
APR_ARRAY_PUSH(remaining_argv, const char *) = s;
}
if (remaining_argv->nelts < 1)
- usage(pool); /* this will exit() */
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
/* Do the main task */
- SVNRAISETC_INT_ERR(raise_tree_conflict(remaining_argv->nelts,
- (const char **)remaining_argv->elts,
- pool));
+ SVN_ERR(raise_tree_conflict(remaining_argv->nelts,
+ (const char **)remaining_argv->elts,
+ pool));
- svn_pool_destroy(pool);
+ return SVN_NO_ERROR;
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code = EXIT_SUCCESS;
+ svn_error_t *err;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svnraisetreeconflict", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
- /* Flush stdout to make sure that the user will see any printing errors. */
- SVNRAISETC_INT_ERR(svn_cmdline_fflush(stdout));
+ err = sub_main(&exit_code, argc, argv, pool);
- return EXIT_SUCCESS;
+ /* Flush stdout and report if it fails. It would be flushed on exit anyway
+ but this makes sure that output is not silently lost if it fails. */
+ err = svn_error_compose_create(err, svn_cmdline_fflush(stdout));
+
+ if (err)
+ {
+ exit_code = EXIT_FAILURE;
+ svn_cmdline_handle_exit_error(err, NULL, "svnraisetreeconflict: ");
+ }
+
+ svn_pool_destroy(pool);
+ return exit_code;
}
diff --git a/tools/dev/trails.py b/tools/dev/trails.py
index 9717c6c..917d234 100755
--- a/tools/dev/trails.py
+++ b/tools/dev/trails.py
@@ -35,7 +35,7 @@ import operator
_re_trail = re.compile('\((?P<txn_body>[a-z_]*), (?P<filename>[a-z_\-./]*), (?P<lineno>[0-9]*), (?P<txn>0|1)\): (?P<ops>.*)')
_re_table_op = re.compile('\(([a-z]*), ([a-z]*)\)')
-_seperator = '------------------------------------------------------------\n'
+_separator = '------------------------------------------------------------\n'
def parse_trails_log(infile):
trails = []
@@ -79,9 +79,9 @@ def output_summary(trails, outfile):
median_ops = ops[total_trails / 2]
average_ops = float(total_ops) / total_trails
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('Summary\n')
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('Total number of trails: %10i\n' % total_trails)
outfile.write('Total number of ops: %10i\n' % total_ops)
outfile.write('max ops/trail: %10i\n' % max_ops)
@@ -123,9 +123,9 @@ def output_trail_length_frequencies(trails, outfile):
total_trails = len(ops)
frequencies = list_frequencies(ops)
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('Trail length frequencies\n')
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('ops/trail frequency percentage\n')
for (r, f) in frequencies:
p = float(f) * 100 / total_trails
@@ -164,9 +164,9 @@ def output_trail_frequencies(trails, outfile):
frequencies = list_frequencies(ttrails)
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('Trail frequencies\n')
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('frequency percentage ops/trail trail\n')
for (((txn_body, file, line), trail), f) in frequencies:
p = float(f) * 100 / total_trails
@@ -183,9 +183,9 @@ def output_txn_body_frequencies(trails, outfile):
total_trails = len(trails)
frequencies = list_frequencies(bodies)
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('txn_body frequencies\n')
- outfile.write(_seperator)
+ outfile.write(_separator)
outfile.write('frequency percentage txn_body\n')
for ((txn_body, file, line), f) in frequencies:
p = float(f) * 100 / total_trails
diff --git a/tools/dev/unix-build/Makefile.svn b/tools/dev/unix-build/Makefile.svn
index 0bdddd5..d6032e3 100644
--- a/tools/dev/unix-build/Makefile.svn
+++ b/tools/dev/unix-build/Makefile.svn
@@ -29,16 +29,19 @@
# | the bot's health after making changes to this file. |
# |______________________________________________________________|
-ENABLE_PERL_BINDINGS ?= yes
+PERL ?= yes
+ENABLE_PERL_BINDINGS = $(PERL)
THREADING ?= yes
ifeq ($(THREADING),yes)
-ENABLE_JAVA_BINDINGS ?= yes
+JAVA ?= yes
else
-ENABLE_JAVA_BINDINGS ?= no
+JAVA ?= no
endif
+ENABLE_JAVA_BINDINGS = $(JAVA)
USE_APR_ICONV ?= no # set to yes to use APR iconv instead of GNU iconv
PARALLEL ?= 1
CLEANUP ?= 1
+EXCLUSIVE_WC_LOCKS ?= 1
USE_HTTPV1 ?= no
USE_AUTHZ_SHORT_CIRCUIT ?= no
RAMDISK ?= /ramdisk
@@ -65,21 +68,22 @@ OBJDIR = $(PWD)/objdir
BDB_MAJOR_VER = 4.7
BDB_VER = $(BDB_MAJOR_VER).25
-APR_VER = 1.4.6
+APR_VER = 1.5.1
APR_ICONV_VER = 1.2.1
GNU_ICONV_VER = 1.14
-APR_UTIL_VER = 1.4.1
-HTTPD_VER = 2.2.22
-NEON_VER = 0.29.6
-SERF_VER = 1.2.0
+APR_UTIL_VER = 1.5.3
+HTTPD_VER = 2.2.29
+NEON_VER = 0.30.0
+SERF_VER = 1.3.8
SERF_OLD_VER = 0.3.1
CYRUS_SASL_VER = 2.1.25
-SQLITE_VER = 3071600
-LIBMAGIC_VER = 5.11
+SQLITE_VER = 3080500
+LIBMAGIC_VER = 5.19
RUBY_VER = 1.8.7-p358
BZ2_VER = 1.0.6
-PYTHON_VER = 2.7.3
+PYTHON_VER = 2.7.8
JUNIT_VER = 4.10
+GETTEXT_VER = 0.18.3.1
BDB_DIST = db-$(BDB_VER).tar.gz
APR_ICONV_DIST = apr-iconv-$(APR_ICONV_VER).tar.gz
@@ -87,12 +91,40 @@ GNU_ICONV_DIST = libiconv-$(GNU_ICONV_VER).tar.gz
NEON_DIST = neon-$(NEON_VER).tar.gz
SQLITE_DIST = sqlite-autoconf-$(SQLITE_VER).tar.gz
CYRUS_SASL_DIST = cyrus-sasl-$(CYRUS_SASL_VER).tar.gz
-HTTPD_DIST = httpd-$(HTTPD_VER).tar.bz2
+HTTPD_DIST = httpd-$(HTTPD_VER).tar.gz
LIBMAGIC_DIST = file-$(LIBMAGIC_VER).tar.gz
RUBY_DIST = ruby-$(RUBY_VER).tar.gz
BZ2_DIST = bzip2-$(BZ2_VER).tar.gz
PYTHON_DIST = Python-$(PYTHON_VER).tgz
JUNIT_DIST = junit-${JUNIT_VER}.jar
+GETTEXT_DIST = gettext-$(GETTEXT_VER).tar.gz
+
+SHA256_${BDB_DIST} = f14fd96dd38915a1d63dcb94a63fbb8092334ceba6b5060760427096f631263e
+SHA256_${APR_ICONV_DIST} = 19381959d50c4a5f3b9c84d594a5f9ffb3809786919b3058281f4c87e1f4b245
+SHA256_${GNU_ICONV_DIST} = 72b24ded17d687193c3366d0ebe7cde1e6b18f0df8c55438ac95be39e8a30613
+SHA256_${HTTPD_DIST} = cec2878884b758b0d159a1385b2667a2ae0ca21b0bc7bcc8a9a41b5cfa5452ff
+SHA256_${NEON_DIST} = 2962cfcb5d30f3272e3d2fa0e473434419770a3801afe3d46e5d1650787990c2
+SHA256_${CYRUS_SASL_DIST} = 418c16e6240a4f9b637cbe3d62937b9675627bad27c622191d47de8686fe24fe
+SHA256_${SQLITE_DIST} = 98c33abe4106e508e73fda648b2657ac9e969fe24695f543dcde68cc71f3091b
+SHA256_${LIBMAGIC_DIST} = 9484b3bbda1acc7b13a4f71031a85ce10c77bd0ffec7226741a219ef587e3a7c
+SHA256_${RUBY_DIST} = 9e0856d58830e08f1e38233947d859898ae09d4780cb1a502108e41308de33cb
+SHA256_${BZ2_DIST} = a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
+SHA256_${PYTHON_DIST} = 74d70b914da4487aa1d97222b29e9554d042f825f26cb2b93abd20fdda56b557
+SHA256_${JUNIT_DIST} = 36a747ca1e0b86f6ea88055b8723bb87030d627766da6288bf077afdeeb0f75a
+SHA256_${GETTEXT_DIST} = 0d8f9a33531b77776b3dc473e7940019ca19bfca5b4c06db6e96065eeb07245d
+
+define do_check_sha256
+if [ -x /bin/sha256 ]; then \
+ (cd $(DISTDIR) && \
+ echo "SHA256 (${1}) = ${SHA256_${1}}" | /bin/sha256 -C /dev/stdin "${1}"); \
+elif [ -x /usr/bin/sha256sum ]; then \
+ (cd $(DISTDIR) && \
+ echo "${SHA256_${1}} ${1}" | /usr/bin/sha256sum --quiet --check); \
+else \
+ echo "Error: No tool found to verify checksum"; \
+ false; \
+fi
+endef
DISTFILES = $(DISTDIR)/$(NEON_DIST) \
$(DISTDIR)/$(SERF_DIST) \
@@ -105,28 +137,29 @@ DISTFILES = $(DISTDIR)/$(NEON_DIST) \
$(DISTDIR)/$(RUBY_DIST) \
$(DISTDIR)/$(BZ2_DIST) \
$(DISTDIR)/$(PYTHON_DIST) \
- $(DISTDIR)/$(JUNIT_DIST)
+ $(DISTDIR)/$(JUNIT_DIST) \
+ $(DISTDIR)/$(GETTEXT_DIST)
FETCH_CMD = wget -c
SUBVERSION_REPOS_URL = https://svn.apache.org/repos/asf/subversion
-BDB_URL = http://ftp2.de.freebsd.org/pub/FreeBSD/distfiles/bdb/$(BDB_DIST)
-APR_URL = http://svn.apache.org/repos/asf/apr/apr
-APR_ICONV_URL = http://www.apache.org/dist/apr/$(APR_ICONV_DIST)
-GNU_ICONV_URL = http://ftp.gnu.org/pub/gnu/libiconv/$(GNU_ICONV_DIST)
-APR_UTIL_URL = http://svn.apache.org/repos/asf/apr/apr-util
-HTTPD_URL = http://archive.apache.org/dist/httpd/$(HTTPD_DIST)
+BDB_URL = http://download.oracle.com/berkeley-db/$(BDB_DIST)
+APR_URL = https://svn.apache.org/repos/asf/apr/apr
+APR_ICONV_URL = https://www.apache.org/dist/apr/$(APR_ICONV_DIST)
+GNU_ICONV_URL = https://ftp.gnu.org/pub/gnu/libiconv/$(GNU_ICONV_DIST)
+APR_UTIL_URL = https://svn.apache.org/repos/asf/apr/apr-util
+HTTPD_URL = https://archive.apache.org/dist/httpd/$(HTTPD_DIST)
NEON_URL = http://webdav.org/neon/$(NEON_DIST)
-#SERF_URL = http://serf.googlecode.com/files/$(SERF_DIST)
-SERF_URL = http://serf.googlecode.com/svn/tags/$(SERF_VER)
-SERF_OLD_URL = http://serf.googlecode.com/svn/tags/$(SERF_OLD_VER)
-SQLITE_URL = http://www.sqlite.org/2013/$(SQLITE_DIST)
+SERF_URL = https://svn.apache.org/repos/asf/serf/tags/$(SERF_VER)
+SERF_OLD_URL = https://svn.apache.org/repos/asf/serf/tags/$(SERF_OLD_VER)
+SQLITE_URL = https://www.sqlite.org/2014/$(SQLITE_DIST)
CYRUS_SASL_URL = ftp://ftp.andrew.cmu.edu/pub/cyrus-mail/$(CYRUS_SASL_DIST)
LIBMAGIC_URL = ftp://ftp.astron.com/pub/file/$(LIBMAGIC_DIST)
RUBY_URL = http://ftp.ruby-lang.org/pub/ruby/1.8/$(RUBY_DIST)
BZ2_URL = http://bzip.org/$(BZ2_VER)/$(BZ2_DIST)
-PYTHON_URL = http://python.org/ftp/python/$(PYTHON_VER)/$(PYTHON_DIST)
-JUNIT_URL = http://cloud.github.com/downloads/KentBeck/junit/$(JUNIT_DIST)
+PYTHON_URL = https://python.org/ftp/python/$(PYTHON_VER)/$(PYTHON_DIST)
+JUNIT_URL = https://downloads.sourceforge.net/project/junit/junit/$(JUNIT_VER)/$(JUNIT_DIST)
+GETTEXT_URL = https://ftp.gnu.org/pub/gnu/gettext/$(GETTEXT_DIST)
BDB_SRCDIR = $(SRCDIR)/db-$(BDB_VER)
@@ -144,6 +177,7 @@ LIBMAGIC_SRCDIR = $(SRCDIR)/file-$(LIBMAGIC_VER)
RUBY_SRCDIR = $(SRCDIR)/ruby-$(RUBY_VER)
BZ2_SRCDIR = $(SRCDIR)/bzip2-$(BZ2_VER)
PYTHON_SRCDIR = $(SRCDIR)/Python-$(PYTHON_VER)
+GETTEXT_SRCDIR = $(SRCDIR)/gettext-$(GETTEXT_VER)
SVN_SRCDIR = $(SVN_WC)
BDB_OBJDIR = $(OBJDIR)/db-$(BDB_VER)
@@ -161,6 +195,7 @@ LIBMAGIC_OBJDIR = $(OBJDIR)/file-$(LIBMAGIC_VER)
RUBY_OBJDIR = $(OBJDIR)/ruby-$(RUBY_VER)
BZ2_OBJDIR = $(OBJDIR)/bzip2-$(BZ2_VER)
PYTHON_OBJDIR = $(OBJDIR)/python-$(PYTHON_VER)
+GETTEXT_OBJDIR = $(OBJDIR)/gettext-$(GETTEXT_VER)
SVN_OBJDIR = $(OBJDIR)/$(SVN_REL_WC)
# Tweak this for out-of-tree builds. Note that running individual
@@ -173,30 +208,30 @@ PROFILE_CFLAGS=-pg
endif
# We need this to make sure some targets below pick up the right libraries
-LD_LIBRARY_PATH=$(PREFIX)/apr/lib:$(PREFIX)/iconv/lib:$(PREFIX)/bdb/lib:$(PREFIX)/neon/lib:$(PREFIX)/serf/lib:$(PREFIX)/sqlite/lib:$(PREFIX)/cyrus-sasl/lib:$(PREFIX)/iconv/lib:$(PREFIX)/libmagic/lib:$(PREFIX)/ruby/lib:$(PREFIX)/python/lib:$(PREFIX)/svn-$(WC)/lib
+LD_LIBRARY_PATH=$(PREFIX)/apr/lib:$(PREFIX)/gettext/lib:$(PREFIX)/iconv/lib:$(PREFIX)/bdb/lib:$(PREFIX)/neon/lib:$(PREFIX)/serf/lib:$(PREFIX)/sqlite/lib:$(PREFIX)/cyrus-sasl/lib:$(PREFIX)/iconv/lib:$(PREFIX)/libmagic/lib:$(PREFIX)/ruby/lib:$(PREFIX)/python/lib:$(PREFIX)/svn-$(WC)/lib
#######################################################################
# Main targets.
#######################################################################
-.PHONY: all reset clean nuke
+.PHONY: all reset clean nuke fetch
all: dirs-create bdb-install apr-install iconv-install apr-util-install \
httpd-install neon-install serf-install serf-old-install \
sqlite-install cyrus-sasl-install libmagic-install \
- ruby-install bz2-install python-install \
+ ruby-install bz2-install python-install gettext-install \
svn-install svn-bindings-install
# Use these to start a build from the beginning.
reset: dirs-reset bdb-reset apr-reset iconv-reset apr-util-reset \
httpd-reset neon-reset serf-reset serf-old-reset sqlite-reset \
cyrus-sasl-reset libmagic-reset ruby-reset python-reset \
- bz2-reset svn-reset
+ bz2-reset gettext-reset svn-reset
# Use to save disk space.
clean: bdb-clean apr-clean iconv-clean apr-util-clean httpd-clean \
neon-clean serf-clean serf-old-clean sqlite-clean cyrus-sasl-clean \
- libmagic-clean ruby-clean bz2-clean python-clean svn-clean
+ libmagic-clean ruby-clean bz2-clean python-clean gettext-clean svn-clean
# Nukes everything (including installed binaries!)
# Use this to start ALL OVER AGAIN! Use with caution!
@@ -222,6 +257,8 @@ nuke:
;; \
esac
+fetch: $(DISTFILES)
+
#######################################################################
# directories
#######################################################################
@@ -248,7 +285,7 @@ bdb-reset:
rm -f $(BDB_OBJDIR)/$(f);)
bdb-clean:
- -(cd $(BDB_SRCDIR)/build_unix/ && make clean)
+ -(cd $(BDB_SRCDIR)/build_unix/ && env MAKEFLAGS= make clean)
# fetch distfile for bdb
$(DISTDIR)/$(BDB_DIST):
@@ -256,6 +293,7 @@ $(DISTDIR)/$(BDB_DIST):
# retrieve bdb
$(BDB_OBJDIR)/.retrieved: $(DISTDIR)/$(BDB_DIST)
+ $(call do_check_sha256,$(BDB_DIST))
[ -d $(BDB_OBJDIR) ] || mkdir -p $(BDB_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(BDB_DIST)
touch $@
@@ -271,12 +309,12 @@ $(BDB_OBJDIR)/.configured: $(BDB_OBJDIR)/.retrieved
# compile bdb
$(BDB_OBJDIR)/.compiled: $(BDB_OBJDIR)/.configured
- (cd $(BDB_SRCDIR)/build_unix && make)
+ (cd $(BDB_SRCDIR)/build_unix && env MAKEFLAGS= make)
touch $@
# install bdb
$(BDB_OBJDIR)/.installed: $(BDB_OBJDIR)/.compiled
- (cd $(BDB_SRCDIR)/build_unix && make install)
+ (cd $(BDB_SRCDIR)/build_unix && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -292,7 +330,7 @@ apr-reset:
rm -f $(APR_OBJDIR)/$(f);)
apr-clean:
- -(cd $(APR_OBJDIR) && make clean)
+ -(cd $(APR_OBJDIR) && env MAKEFLAGS= make clean)
# retrieve apr if not present yet
$(APR_OBJDIR)/.retrieved:
@@ -310,10 +348,14 @@ endif
ifdef POOL_DEBUG
POOL_DEBUG_FLAG=--enable-pool-debug=all
+else
+# Map apr_palloc()/apr_pool_{clear,destroy}() to malloc()/free().
+# This also puts poison bytes into freed memory to help detect use after free.
+POOL_DEBUG_FLAG=--enable-pool-debug=yes
endif
# configure apr
-$(APR_OBJDIR)/.configured: $(APR_OBJDIR)/.retrieved
+$(APR_OBJDIR)/.configured: $(APR_OBJDIR)/.retrieved $(BDB_OBJDIR)/.installed
cd $(APR_SRCDIR) && ./buildconf
cd $(APR_OBJDIR) \
&& env CFLAGS="-O0 -g $(PROFILE_CFLAGS)" GREP="`which grep`" \
@@ -326,12 +368,12 @@ $(APR_OBJDIR)/.configured: $(APR_OBJDIR)/.retrieved
# compile apr
$(APR_OBJDIR)/.compiled: $(APR_OBJDIR)/.configured
- (cd $(APR_OBJDIR) && make)
+ (cd $(APR_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install apr
$(APR_OBJDIR)/.installed: $(APR_OBJDIR)/.compiled
- (cd $(APR_OBJDIR) && make install)
+ (cd $(APR_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -347,7 +389,7 @@ apr-iconv-reset:
rm -f $(APR_ICONV_OBJDIR)/$(f);)
apr-iconv-clean:
- -(cd $(APR_ICONV_OBJDIR) && make clean)
+ -(cd $(APR_ICONV_OBJDIR) && env MAKEFLAGS= make clean)
# fetch distfile for apr-iconv
$(DISTDIR)/$(APR_ICONV_DIST):
@@ -355,14 +397,17 @@ $(DISTDIR)/$(APR_ICONV_DIST):
# retrieve apr-iconv
$(APR_ICONV_OBJDIR)/.retrieved: $(DISTDIR)/$(APR_ICONV_DIST)
+ $(call do_check_sha256,$(APR_ICONV_DIST))
[ -d $(APR_ICONV_OBJDIR) ] || mkdir -p $(APR_ICONV_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(APR_ICONV_DIST)
touch $@
# configure apr-iconv
-$(APR_ICONV_OBJDIR)/.configured: $(APR_ICONV_OBJDIR)/.retrieved
+$(APR_ICONV_OBJDIR)/.configured: $(APR_ICONV_OBJDIR)/.retrieved \
+ $(APR_OBJDIR)/.installed
cd $(APR_ICONV_OBJDIR) \
- && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ && env CFLAGS="-g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ GREP="`which grep`" \
$(APR_ICONV_SRCDIR)/configure \
--prefix=$(PREFIX)/apr \
--with-apr=$(PREFIX)/apr
@@ -371,12 +416,12 @@ $(APR_ICONV_OBJDIR)/.configured: $(APR_ICONV_OBJDIR)/.retrieved
# compile apr-iconv
$(APR_ICONV_OBJDIR)/.compiled: $(APR_ICONV_OBJDIR)/.configured
(cd $(APR_ICONV_OBJDIR) \
- && make CPPFLAGS="-D_OSD_POSIX" CFLAGS="-g -O0 $(PROFILE_CFLAGS)")
+ && env MAKEFLAGS= make CPPFLAGS="-D_OSD_POSIX" CFLAGS="-g -O0 $(PROFILE_CFLAGS)")
touch $@
# install apr-iconv
$(APR_ICONV_OBJDIR)/.installed: $(APR_ICONV_OBJDIR)/.compiled
- (cd $(APR_ICONV_OBJDIR) && make install)
+ (cd $(APR_ICONV_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -392,7 +437,7 @@ gnu-iconv-reset:
rm -f $(GNU_ICONV_OBJDIR)/$(f);)
gnu-iconv-clean:
- -(cd $(GNU_ICONV_OBJDIR) && make clean)
+ -(cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make clean)
rm -f $(GNU_ICONV_OBJDIR)/lib_encodings.def.diff
rm -f $(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff
@@ -433,6 +478,7 @@ $(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff:
$(GNU_ICONV_OBJDIR)/.retrieved: $(DISTDIR)/$(GNU_ICONV_DIST) \
$(GNU_ICONV_OBJDIR)/lib_encodings.def.diff \
$(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff
+ $(call do_check_sha256,$(GNU_ICONV_DIST))
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(GNU_ICONV_DIST)
cd $(SRCDIR)/libiconv-$(GNU_ICONV_VER) && \
patch -p0 < $(GNU_ICONV_OBJDIR)/lib_encodings.def.diff && \
@@ -452,12 +498,12 @@ $(GNU_ICONV_OBJDIR)/.configured: $(GNU_ICONV_OBJDIR)/.retrieved
# compile gnu-iconv
$(GNU_ICONV_OBJDIR)/.compiled: $(GNU_ICONV_OBJDIR)/.configured
- (cd $(GNU_ICONV_OBJDIR) && make)
+ (cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install gnu-iconv
$(GNU_ICONV_OBJDIR)/.installed: $(GNU_ICONV_OBJDIR)/.compiled
- (cd $(GNU_ICONV_OBJDIR) && make install)
+ (cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -489,7 +535,7 @@ apr-util-reset:
rm -f $(APR_UTIL_OBJDIR)/$(f);)
apr-util-clean:
- -(cd $(APR_UTIL_OBJDIR) && make clean)
+ -(cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make clean)
# retrieve apr-util if not present yet
@@ -503,16 +549,19 @@ $(APR_UTIL_OBJDIR)/.retrieved:
ifeq ($(USE_APR_ICONV),yes)
ICONV_FLAG=--with-iconv=$(PREFIX)/apr
+ICONV_OBJDIR=$(APR_ICONV_OBJDIR)
else
ICONV_FLAG=--with-iconv=$(PREFIX)/iconv
+ICONV_OBJDIR=$(GNU_ICONV_OBJDIR)
endif
# configure apr-util
-$(APR_UTIL_OBJDIR)/.configured: $(APR_UTIL_OBJDIR)/.retrieved
+$(APR_UTIL_OBJDIR)/.configured: $(APR_UTIL_OBJDIR)/.retrieved \
+ $(APR_OBJDIR)/.installed $(ICONV_OBJDIR)/.installed
cd $(APR_UTIL_SRCDIR) && ./buildconf --with-apr=$(APR_SRCDIR)
cd $(APR_UTIL_OBJDIR) \
&& env LD_LIBRARY_PATH=$(PREFIX)/bdb/lib \
- CFLAGS="-O0 -g $(PROFILE_CFLAGS)" \
+ CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
GREP="`which grep`" \
$(APR_UTIL_SRCDIR)/configure \
--prefix=$(PREFIX)/apr \
@@ -524,12 +573,12 @@ $(APR_UTIL_OBJDIR)/.configured: $(APR_UTIL_OBJDIR)/.retrieved
# compile apr-util
$(APR_UTIL_OBJDIR)/.compiled: $(APR_UTIL_OBJDIR)/.configured
- (cd $(APR_UTIL_OBJDIR) && make)
+ (cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install apr-util
$(APR_UTIL_OBJDIR)/.installed: $(APR_UTIL_OBJDIR)/.compiled
- (cd $(APR_UTIL_OBJDIR) && make install)
+ (cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -546,22 +595,64 @@ httpd-reset:
rm -f $(HTTPD_OBJDIR)/$(f);)
httpd-clean:
- -(cd $(HTTPD_OBJDIR) && make clean)
+ -(cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make clean)
# fetch distfile for httpd
$(DISTDIR)/$(HTTPD_DIST):
cd $(DISTDIR) && $(FETCH_CMD) $(HTTPD_URL)
+$(HTTPD_OBJDIR)/chil-engine.diff:
+ mkdir -p $(dir $@)
+ echo > $@.tmp '--- modules/ssl/ssl_engine_init.c.orig Mon Apr 14 13:20:57 2014'
+ echo >>$@.tmp '+++ modules/ssl/ssl_engine_init.c Mon Apr 14 13:21:22 2014'
+ echo >>$@.tmp '@@ -406,9 +406,11 @@ void ssl_init_Engine(server_rec *s, apr_pool_t *p)'
+ echo >>$@.tmp ' ssl_die();'
+ echo >>$@.tmp ' }'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp '+#ifdef ENGINE_CTRL_CHIL_SET_FORKCHECK'
+ echo >>$@.tmp ' if (strEQ(mc->szCryptoDevice, "chil")) {'
+ echo >>$@.tmp ' ENGINE_ctrl(e, ENGINE_CTRL_CHIL_SET_FORKCHECK, 1, 0, 0);'
+ echo >>$@.tmp ' }'
+ echo >>$@.tmp '+#endif'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp ' if (!ENGINE_set_default(e, ENGINE_METHOD_ALL)) {'
+ echo >>$@.tmp ' ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,'
+ mv -f $@.tmp $@
+
+
# retrieve httpd
-$(HTTPD_OBJDIR)/.retrieved: $(DISTDIR)/$(HTTPD_DIST)
+$(HTTPD_OBJDIR)/.retrieved: $(DISTDIR)/$(HTTPD_DIST) \
+ $(HTTPD_OBJDIR)/chil-engine.diff
+ $(call do_check_sha256,$(HTTPD_DIST))
[ -d $(HTTPD_OBJDIR) ] || mkdir -p $(HTTPD_OBJDIR)
- tar -C $(SRCDIR) -jxf $(DISTDIR)/$(HTTPD_DIST)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(HTTPD_DIST)
+ cd $(HTTPD_SRCDIR) && patch -p0 < $(HTTPD_OBJDIR)/chil-engine.diff
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h.orig
+ sed '/^#define HAVE_SSL_RAND_EGD/d' \
+ < $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c.orig
+ sed 's/^\(#if (OPENSSL_VERSION_NUMBER >= 0x00908000)\)$$/\1 \&\& !defined(OPENSSL_NO_COMP)/' \
+ < $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_init.c \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_init.c.orig
+ $(foreach f, ssl_engine_init.c ssl_util_ssl.c ssl_util_ssl.h, \
+ cp $(HTTPD_SRCDIR)/modules/ssl/${f} $(HTTPD_SRCDIR)/modules/ssl/${f}.orig; \
+ sed 's/SSL_CTX_use_certificate_chain/_SSL_CTX_use_certificate_chain/' \
+ < $(HTTPD_SRCDIR)/modules/ssl/${f}.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/${f};\
+ )
touch $@
# configure httpd
-$(HTTPD_OBJDIR)/.configured: $(HTTPD_OBJDIR)/.retrieved
+$(HTTPD_OBJDIR)/.configured: $(HTTPD_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
cd $(HTTPD_OBJDIR) \
- && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ && env CFLAGS="-g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ GREP="`which grep`" \
$(HTTPD_SRCDIR)/configure \
--prefix=$(PREFIX)/httpd \
--enable-maintainer-mode \
@@ -574,12 +665,12 @@ $(HTTPD_OBJDIR)/.configured: $(HTTPD_OBJDIR)/.retrieved
# compile httpd
$(HTTPD_OBJDIR)/.compiled: $(HTTPD_OBJDIR)/.configured
- (cd $(HTTPD_OBJDIR) && make)
+ (cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install httpd
$(HTTPD_OBJDIR)/.installed: $(HTTPD_OBJDIR)/.compiled
- (cd $(HTTPD_OBJDIR) && make install)
+ (cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make install)
touch $@
# create a httpd.conf for mod_dav_svn
@@ -617,26 +708,17 @@ neon-reset:
rm -f $(NEON_OBJDIR)/$(f);)
neon-clean:
- -(cd $(NEON_OBJDIR) && make clean)
+ -(cd $(NEON_OBJDIR) && env MAKEFLAGS= make clean)
# fetch distfile for neon
$(DISTDIR)/$(NEON_DIST):
cd $(DISTDIR) && $(FETCH_CMD) $(NEON_URL)
# retrieve neon
-NEON_SVN_URL=http://svn.webdav.org/repos/projects/neon/trunk
$(NEON_OBJDIR)/.retrieved: $(DISTDIR)/$(NEON_DIST)
+ $(call do_check_sha256,$(NEON_DIST))
[ -d $(NEON_OBJDIR) ] || mkdir -p $(NEON_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(NEON_DIST)
- # fix build with OpenSSL lacking SSLv2 support:
- cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
- $(NEON_SVN_URL)/src/ne_openssl.c | patch -p0
- cd $(NEON_SRCDIR)/src && svn diff -c 1872 \
- $(NEON_SVN_URL)/src/ne_openssl.c | patch -p0
- cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
- $(NEON_SVN_URL)/src/ne_ssl.h | patch -p0
- cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
- $(NEON_SVN_URL)/src/ne_session.c | patch -p0
touch $@
# OpenBSD does not have krb5-config in PATH, but the neon port has
@@ -664,12 +746,12 @@ $(NEON_OBJDIR)/.configured: $(NEON_OBJDIR)/.retrieved
# compile neon
$(NEON_OBJDIR)/.compiled: $(NEON_OBJDIR)/.configured
- (cd $(NEON_OBJDIR) && make)
+ (cd $(NEON_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install neon
$(NEON_OBJDIR)/.installed: $(NEON_OBJDIR)/.compiled
- (cd $(NEON_OBJDIR) && make install)
+ (cd $(NEON_OBJDIR) && env MAKEFLAGS= make install)
touch $@
@@ -686,7 +768,7 @@ serf-reset:
rm -f $(SERF_OBJDIR)/$(f);)
serf-clean:
- -(cd $(SERF_SRCDIR) && ./serfmake clean)
+ -(cd $(SERF_SRCDIR) && scons -c)
# fetch distfile for serf
@@ -710,21 +792,24 @@ $(SERF_OBJDIR)/.retrieved:
touch $@
# compile serf (serf won't compile outside its source tree)
-$(SERF_OBJDIR)/.compiled: $(SERF_OBJDIR)/.retrieved
+$(SERF_OBJDIR)/.compiled: $(SERF_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
cd $(SERF_SRCDIR) && \
- env CFLAGS="-O0 -g $(PROFILE_CFLAGS)" \
- ./serfmake --with-apr=$(PREFIX)/apr \
- --prefix=$(PREFIX)/serf \
- build
+ scons DEBUG=1 \
+ CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ APR=$(PREFIX)/apr \
+ APU=$(PREFIX)/apr \
+ PREFIX=$(PREFIX)/serf
touch $@
# install serf
$(SERF_OBJDIR)/.installed: $(SERF_OBJDIR)/.compiled
+ rm -rf $(PREFIX)/serf # XXX scons cannot reinstall :(
cd $(SERF_SRCDIR) && \
- ./serfmake --with-apr=$(PREFIX)/apr \
- --with-apr-util=$(PREFIX)/apr \
- --prefix=$(PREFIX)/serf \
- install
+ scons install
+ # work around unportable scons shared lib support
+ -ln -s libserf-1.so.$(shell echo $(SERF_VER) | sed -e 's/[0-9]$$/0/') \
+ $(PREFIX)/serf/lib/libserf-1.so
touch $@
#######################################################################
@@ -751,9 +836,10 @@ $(SERF_OLD_OBJDIR)/.retrieved:
touch $@
# compile serf (serf won't compile outside its source tree)
-$(SERF_OLD_OBJDIR)/.compiled: $(SERF_OLD_OBJDIR)/.retrieved
+$(SERF_OLD_OBJDIR)/.compiled: $(SERF_OLD_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
cd $(SERF_OLD_SRCDIR) && \
- env CFLAGS="-O0 -g $(PROFILE_CFLAGS)" \
+ env CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
./serfmake --with-apr=$(PREFIX)/apr \
--prefix=$(PREFIX)/serf-old \
build
@@ -782,7 +868,7 @@ sqlite-reset:
rm -f $(SQLITE_OBJDIR)/$(f);)
sqlite-clean:
- -cd $(SQLITE_OBJDIR) && make clean
+ -cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make clean
# fetch distfile for sqlite
$(DISTDIR)/$(SQLITE_DIST):
@@ -790,6 +876,7 @@ $(DISTDIR)/$(SQLITE_DIST):
# retrieve sqlite
$(SQLITE_OBJDIR)/.retrieved: $(DISTDIR)/$(SQLITE_DIST)
+ $(call do_check_sha256,$(SQLITE_DIST))
[ -d $(SQLITE_OBJDIR) ] || mkdir -p $(SQLITE_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(SQLITE_DIST)
touch $@
@@ -811,12 +898,12 @@ $(SQLITE_OBJDIR)/.configured: $(SQLITE_OBJDIR)/.retrieved
# compile sqlite
$(SQLITE_OBJDIR)/.compiled: $(SQLITE_OBJDIR)/.configured
- (cd $(SQLITE_OBJDIR) && make)
+ (cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install sqlite
$(SQLITE_OBJDIR)/.installed: $(SQLITE_OBJDIR)/.compiled
- (cd $(SQLITE_OBJDIR) && make install)
+ (cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -832,7 +919,7 @@ cyrus-sasl-reset:
rm -f $(CYRUS_SASL_OBJDIR)/$(f);)
cyrus-sasl-clean:
- -(cd $(CYRUS_SASL_OBJDIR) && make distclean)
+ -(cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make distclean)
# fetch distfile for cyrus-sasl
$(DISTDIR)/$(CYRUS_SASL_DIST):
@@ -840,6 +927,7 @@ $(DISTDIR)/$(CYRUS_SASL_DIST):
# retrieve cyrus-sasl
$(CYRUS_SASL_OBJDIR)/.retrieved: $(DISTDIR)/$(CYRUS_SASL_DIST)
+ $(call do_check_sha256,$(CYRUS_SASL_DIST))
[ -d $(CYRUS_SASL_OBJDIR) ] || mkdir -p $(CYRUS_SASL_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(CYRUS_SASL_DIST)
# fixes build on Debian:
@@ -867,7 +955,8 @@ endif
touch $@
# configure cyrus-sasl
-$(CYRUS_SASL_OBJDIR)/.configured: $(CYRUS_SASL_OBJDIR)/.retrieved
+$(CYRUS_SASL_OBJDIR)/.configured: $(CYRUS_SASL_OBJDIR)/.retrieved \
+ $(BDB_OBJDIR)/.installed $(SQLITE_OBJDIR)/.installed
cd $(CYRUS_SASL_OBJDIR) \
&& env CFLAGS="-g $(PROFILE_CFLAGS)" \
CPPFLAGS="-I/usr/include/kerberosV" \
@@ -885,12 +974,12 @@ $(CYRUS_SASL_OBJDIR)/.configured: $(CYRUS_SASL_OBJDIR)/.retrieved
# compile cyrus-sasl
$(CYRUS_SASL_OBJDIR)/.compiled: $(CYRUS_SASL_OBJDIR)/.configured
- (cd $(CYRUS_SASL_OBJDIR) && make)
+ (cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install cyrus-sasl
$(CYRUS_SASL_OBJDIR)/.installed: $(CYRUS_SASL_OBJDIR)/.compiled
- (cd $(CYRUS_SASL_OBJDIR) && make install)
+ (cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -906,7 +995,7 @@ libmagic-reset:
rm -f $(LIBMAGIC_OBJDIR)/$(f);)
libmagic-clean:
- -(cd $(LIBMAGIC_OBJDIR) && make distclean)
+ -(cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make distclean)
# fetch distfile for libmagic
$(DISTDIR)/$(LIBMAGIC_DIST):
@@ -914,6 +1003,7 @@ $(DISTDIR)/$(LIBMAGIC_DIST):
# retrieve libmagic
$(LIBMAGIC_OBJDIR)/.retrieved: $(DISTDIR)/$(LIBMAGIC_DIST)
+ $(call do_check_sha256,$(LIBMAGIC_DIST))
[ -d $(LIBMAGIC_OBJDIR) ] || mkdir -p $(LIBMAGIC_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(LIBMAGIC_DIST)
touch $@
@@ -929,12 +1019,12 @@ $(LIBMAGIC_OBJDIR)/.configured: $(LIBMAGIC_OBJDIR)/.retrieved
# compile libmagic
$(LIBMAGIC_OBJDIR)/.compiled: $(LIBMAGIC_OBJDIR)/.configured
- (cd $(LIBMAGIC_OBJDIR) && make)
+ (cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install libmagic
$(LIBMAGIC_OBJDIR)/.installed: $(LIBMAGIC_OBJDIR)/.compiled
- (cd $(LIBMAGIC_OBJDIR) && make install)
+ (cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -950,7 +1040,7 @@ ruby-reset:
rm -f $(RUBY_OBJDIR)/$(f);)
ruby-clean:
- -(cd $(RUBY_OBJDIR) && make distclean)
+ -(cd $(RUBY_OBJDIR) && env MAKEFLAGS= make distclean)
# fetch distfile for ruby
$(DISTDIR)/$(RUBY_DIST):
@@ -959,6 +1049,7 @@ $(DISTDIR)/$(RUBY_DIST):
# retrieve ruby
#
$(RUBY_OBJDIR)/.retrieved: $(DISTDIR)/$(RUBY_DIST)
+ $(call do_check_sha256,$(RUBY_DIST))
[ -d $(RUBY_OBJDIR) ] || mkdir -p $(RUBY_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(RUBY_DIST)
touch $@
@@ -981,12 +1072,12 @@ $(RUBY_OBJDIR)/.configured: $(RUBY_OBJDIR)/.retrieved
# compile ruby
$(RUBY_OBJDIR)/.compiled: $(RUBY_OBJDIR)/.configured
- (cd $(RUBY_OBJDIR) && make)
+ (cd $(RUBY_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install ruby
$(RUBY_OBJDIR)/.installed: $(RUBY_OBJDIR)/.compiled
- (cd $(RUBY_OBJDIR) && make install)
+ (cd $(RUBY_OBJDIR) && env MAKEFLAGS= make install)
touch $@
#######################################################################
@@ -1001,7 +1092,7 @@ bz2-reset:
rm -f $(BZ2_OBJDIR)/$(f);)
bz2-clean:
- -(cd $(BZ2_SRCDIR) && make distclean)
+ -(cd $(BZ2_SRCDIR) && env MAKEFLAGS= make distclean)
# fetch distfile for bz2
$(DISTDIR)/$(BZ2_DIST):
@@ -1009,18 +1100,19 @@ $(DISTDIR)/$(BZ2_DIST):
# retrieve bz2
$(BZ2_OBJDIR)/.retrieved: $(DISTDIR)/$(BZ2_DIST)
+ $(call do_check_sha256,$(BZ2_DIST))
[ -d $(BZ2_OBJDIR) ] || mkdir -p $(BZ2_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(BZ2_DIST)
touch $@
# compile bz2
$(BZ2_OBJDIR)/.compiled: $(BZ2_OBJDIR)/.retrieved
- (cd $(BZ2_SRCDIR) && make CFLAGS="-g $(PROFILE_CFLAGS) -fPIC")
+ (cd $(BZ2_SRCDIR) && env MAKEFLAGS= make CFLAGS="-g $(PROFILE_CFLAGS) -fPIC")
touch $@
# install bz2
$(BZ2_OBJDIR)/.installed: $(BZ2_OBJDIR)/.compiled
- (cd $(BZ2_SRCDIR) && make install PREFIX=$(PREFIX)/bz2)
+ (cd $(BZ2_SRCDIR) && env MAKEFLAGS= make install PREFIX=$(PREFIX)/bz2)
touch $@
@@ -1037,7 +1129,7 @@ python-reset:
rm -f $(PYTHON_OBJDIR)/$(f);)
python-clean:
- -(cd $(PYTHON_OBJDIR) && make distclean)
+ -(cd $(PYTHON_OBJDIR) && env MAKEFLAGS= make distclean)
# fetch distfile for python
$(DISTDIR)/$(PYTHON_DIST):
@@ -1046,13 +1138,14 @@ $(DISTDIR)/$(PYTHON_DIST):
# retrieve python
#
$(PYTHON_OBJDIR)/.retrieved: $(DISTDIR)/$(PYTHON_DIST)
+ $(call do_check_sha256,$(PYTHON_DIST))
[ -d $(PYTHON_OBJDIR) ] || mkdir -p $(PYTHON_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(PYTHON_DIST)
# Make setup.py use our own dependencies instead of system ones
sed -e "s#sqlite_inc_paths = \[ '/usr/include',#sqlite_inc_paths = [ '$(PREFIX)/sqlite/include',#" \
-e "s#'/usr/include/db4'#'$(PREFIX)/bdb/include'#" \
- -e "s|\(add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')\)|#\1|" \
- -e "s|\(add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')\)|#\1|" \
+ -e "s|\(add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')\)|pass #\1|" \
+ -e "s|\(add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')\)|pass #\1|" \
-e "s#find_library_file(lib_dirs, 'bz2'#find_library_file(['$(PREFIX)/bz2/lib'] + lib_dirs, 'bz2'#" \
< $(PYTHON_SRCDIR)/setup.py \
> $(PYTHON_SRCDIR)/setup.py.patched
@@ -1074,7 +1167,8 @@ $(PYTHON_OBJDIR)/.retrieved: $(DISTDIR)/$(PYTHON_DIST)
ifdef PROFILE
PYTHON_PROFILING=--enable-profiling
endif
-$(PYTHON_OBJDIR)/.configured: $(PYTHON_OBJDIR)/.retrieved
+$(PYTHON_OBJDIR)/.configured: $(PYTHON_OBJDIR)/.retrieved \
+ $(BZ2_OBJDIR)/.installed
cd $(PYTHON_OBJDIR) \
&& env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
CPPFLAGS="-I$(PREFIX)/bz2/include" \
@@ -1089,12 +1183,12 @@ $(PYTHON_OBJDIR)/.configured: $(PYTHON_OBJDIR)/.retrieved
# compile python
$(PYTHON_OBJDIR)/.compiled: $(PYTHON_OBJDIR)/.configured
- (cd $(PYTHON_OBJDIR) && make)
+ (cd $(PYTHON_OBJDIR) && env MAKEFLAGS= make)
touch $@
# install python
$(PYTHON_OBJDIR)/.installed: $(PYTHON_OBJDIR)/.compiled
- (cd $(PYTHON_OBJDIR) && make install)
+ (cd $(PYTHON_OBJDIR) && env MAKEFLAGS= make install)
touch $@
@@ -1105,6 +1199,65 @@ $(PYTHON_OBJDIR)/.installed: $(PYTHON_OBJDIR)/.compiled
# fetch distfile for junit
$(DISTDIR)/$(JUNIT_DIST):
cd $(DISTDIR) && $(FETCH_CMD) $(JUNIT_URL)
+ $(call do_check_sha256,$(JUNIT_DIST))
+
+
+#######################################################################
+# gettext
+#######################################################################
+
+gettext-retrieve: $(GETTEXT_OBJDIR)/.retrieved
+gettext-configure: $(GETTEXT_OBJDIR)/.configured
+gettext-compile: $(GETTEXT_OBJDIR)/.compiled
+gettext-install: $(GETTEXT_OBJDIR)/.installed
+gettext-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(GETTEXT_OBJDIR)/$(f);)
+
+gettext-clean:
+ -(cd $(GETTEXT_OBJDIR) && env MAKEFLAGS= make clean)
+
+# fetch distfile for gettext
+$(DISTDIR)/$(GETTEXT_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(GETTEXT_URL)
+
+# retrieve gettext
+$(GETTEXT_OBJDIR)/.retrieved: $(DISTDIR)/$(GETTEXT_DIST)
+ $(call do_check_sha256,$(GETTEXT_DIST))
+ [ -d $(GETTEXT_OBJDIR) ] || mkdir -p $(GETTEXT_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(GETTEXT_DIST)
+ touch $@
+
+# (gettext won't compile outside its source tree)
+# configure gettext
+$(GETTEXT_OBJDIR)/.configured: $(GETTEXT_OBJDIR)/.retrieved
+ cd $(GETTEXT_SRCDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`"\
+ LDFLAGS="-L$(PREFIX)/iconv/lib" \
+ $(GETTEXT_SRCDIR)/configure \
+ --prefix=$(PREFIX)/gettext \
+ --with-libiconv-prefix=$(PREFIX)/iconv \
+ --disable-c++ \
+ --disable-java \
+ --disable-csharp \
+ $(THREADS_FLAG)
+ -which gsed && \
+ sed -e 's/sed /gsed /g' < $(GETTEXT_SRCDIR)/build-aux/moopp \
+ > $(GETTEXT_SRCDIR)/build-aux/moopp.fixed && \
+ mv $(GETTEXT_SRCDIR)/build-aux/moopp.fixed \
+ $(GETTEXT_SRCDIR)/build-aux/moopp && \
+ chmod +x $(GETTEXT_SRCDIR)/build-aux/moopp
+ touch $@
+
+# compile gettext
+$(GETTEXT_OBJDIR)/.compiled: $(GETTEXT_OBJDIR)/.configured
+ (cd $(GETTEXT_SRCDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install gettext
+$(GETTEXT_OBJDIR)/.installed: $(GETTEXT_OBJDIR)/.compiled
+ (cd $(GETTEXT_SRCDIR) && env MAKEFLAGS= make install)
+ touch $@
#######################################################################
# svn
@@ -1129,7 +1282,7 @@ svn-reset: svn-bindings-reset
rm -f $(SVN_OBJDIR)/$(f);)
svn-clean:
- -(cd $(svn_builddir) && make distclean)
+ -(cd $(svn_builddir) && env MAKEFLAGS= make distclean)
# retrieve svn if not present yet
$(SVN_OBJDIR)/.retrieved:
@@ -1153,16 +1306,20 @@ $(SVN_OBJDIR)/.retrieved:
ifeq ($(BRANCH_MAJOR),1.7)
BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
SERF_FLAG=--with-serf="$(PREFIX)/serf"
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
NEON_FLAG=--with-neon="$(PREFIX)/neon"
JAVAHL_CHECK_TARGET=check-javahl
else ifeq ($(BRANCH_MAJOR),1.6)
BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
SERF_FLAG=--with-serf="$(PREFIX)/serf"
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
W_NO_SYSTEM_HEADERS=-Wno-system-headers
NEON_FLAG=--with-neon="$(PREFIX)/neon"
JAVAHL_CHECK_TARGET=check-javahl
@@ -1171,6 +1328,7 @@ BDB_FLAG=$(PREFIX)/bdb
SERF_FLAG=--with-serf="$(PREFIX)/serf-old"
MOD_DAV_SVN=modules/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/mod_dontdothat.so
DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
W_NO_SYSTEM_HEADERS=-Wno-system-headers
NEON_FLAG=--with-neon="$(PREFIX)/neon"
@@ -1178,8 +1336,12 @@ JAVAHL_CHECK_TARGET=check-javahl
else # 1.8
BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
SERF_FLAG=--with-serf="$(PREFIX)/serf"
+# serf >= 1.3.0 is built with scons and no longer sets up rpath linker flags,
+# so we have to do that ourselves :(
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
JAVAHL_CHECK_TARGET=check-all-javahl
endif
@@ -1192,21 +1354,29 @@ else
endif
ifdef PROFILE
-SVN_ALL_STATIC=--enable-all-static
+SVN_STATIC_FLAG=--enable-all-static
else
+SVN_STATIC_FLAG=--disable-static
SVN_WITH_HTTPD=--with-apxs="$(PREFIX)/httpd/bin/apxs" \
--with-apache-libexecdir="$(PREFIX)/httpd/modules/svn-$(WC)"
SVN_WITH_SASL=--with-sasl="$(PREFIX)/cyrus-sasl"
endif
-# configure svn
-$(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved $(DISTDIR)/$(JUNIT_DIST)
+$(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved $(DISTDIR)/$(JUNIT_DIST) \
+ $(APR_OBJDIR)/.installed $(APR_UTIL_OBJDIR)/.installed \
+ $(BDB_OBJDIR)/.installed $(SQLITE_OBJDIR)/.installed \
+ $(HTTPD_OBJDIR)/.installed $(CYRUS_SASL_OBJDIR)/.installed \
+ $(LIBMAGIC_OBJDIR)/.installed $(NEON_OBJDIR)/.installed \
+ $(SERF_OBJDIR)/.installed $(SERF_OLD_OBJDIR)/.installed \
+ $(RUBY_OBJDIR)/.installed $(PYTHON_OBJDIR)/.installed
cd $(SVN_SRCDIR) && ./autogen.sh
cd $(svn_builddir) && \
- env LDFLAGS="-L$(PREFIX)/neon/lib -L$(PREFIX)/apr/lib" \
+ env LDFLAGS="-L$(PREFIX)/neon/lib -L$(PREFIX)/apr/lib $(SERF_LDFLAG) -L$(PREFIX)/gettext/lib -L$(PREFIX)/iconv/lib" \
+ CFLAGS="-I$(PREFIX)/gettext/include -DAPR_POOL_DEBUG" \
+ CXXFLAGS="-I$(PREFIX)/gettext/include -DAPR_POOL_DEBUG" \
LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
GREP="`which grep`" \
- PATH=$(PREFIX)/ruby/bin:$(PREFIX)/python/bin:$$PATH \
+ PATH=$(PREFIX)/ruby/bin:$(PREFIX)/python/bin:$(PREFIX)/gettext/bin:$$PATH \
$(SVN_SRCDIR)/configure \
--enable-maintainer-mode \
--with-ssl \
@@ -1223,20 +1393,20 @@ $(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved $(DISTDIR)/$(JUNIT_DIST)
--disable-mod-activation \
$(JAVAHL_FLAG) \
$(LIBMAGIC_FLAG) \
- $(SVN_ALL_STATIC) \
+ $(SVN_STATIC_FLAG) \
$(DISABLE_NEON_VERSION_CHECK)
touch $@
# compile svn
$(SVN_OBJDIR)/.compiled: $(SVN_OBJDIR)/.configured
cd $(svn_builddir) \
- && make EXTRA_CFLAGS="$(PROFILE_CFLAGS) $(W_NO_SYSTEM_HEADERS)"
+ && env MAKEFLAGS= make EXTRA_CFLAGS="$(PROFILE_CFLAGS) $(W_NO_SYSTEM_HEADERS)"
touch $@
# install svn
$(SVN_OBJDIR)/.installed: $(SVN_OBJDIR)/.compiled
cd $(svn_builddir) \
- && make install
+ && env MAKEFLAGS= make install install-tools
touch $@
# SWIG 1.x and 2.x are not compatible. If SWIG 2.x is used to generated .swg
@@ -1246,44 +1416,44 @@ $(SVN_OBJDIR)/.installed: $(SVN_OBJDIR)/.compiled
# by the same version of SWIG.
$(SVN_OBJDIR)/.pre-generated-swig-cleaned:
-cd $(svn_builddir) \
- && make extraclean-swig
+ && env MAKEFLAGS= make clean-swig
touch $@
$(SVN_OBJDIR)/.bindings-compiled: $(SVN_OBJDIR)/.installed $(SVN_OBJDIR)/.pre-generated-swig-cleaned
cd $(svn_builddir) \
&& env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make swig-py
+ env MAKEFLAGS= make swig-py
cd $(svn_builddir) && \
env PATH=$(PREFIX)/ruby/bin:$$PATH \
- LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) make swig-rb
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) env MAKEFLAGS= make swig-rb
if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
&& env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make swig-pl; \
+ env MAKEFLAGS= make swig-pl; \
fi
if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
- && make javahl; \
+ && env MAKEFLAGS= make javahl; \
fi
touch $@
$(SVN_OBJDIR)/.bindings-installed: $(SVN_OBJDIR)/.bindings-compiled
cd $(svn_builddir) \
&& env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make install-swig-py
+ env MAKEFLAGS= make install-swig-py
cd $(svn_builddir) && \
env PATH=$(PREFIX)/ruby/bin:$$PATH \
- LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) make install-swig-rb
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) env MAKEFLAGS= make install-swig-rb
if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
- && make install-swig-pl-lib; \
+ && env MAKEFLAGS= make install-swig-pl-lib; \
cd subversion/bindings/swig/perl/native \
&& perl Makefile.PL PREFIX="$(SVN_PREFIX)" \
- && make install; \
+ && env MAKEFLAGS= make install; \
fi
if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
- && make install-javahl; \
+ && env MAKEFLAGS= make install-javahl; \
fi
touch $@
@@ -1291,6 +1461,13 @@ $(SVN_OBJDIR)/.bindings-installed: $(SVN_OBJDIR)/.bindings-compiled
HTTPD_CHECK_CONF=$(PREFIX)/httpd/conf/httpd-svn-check-$(WC).conf
HTTPD_CHECK_USERS=$(PREFIX)/httpd/conf/httpd-svn-check-users
HTTPD_CHECK_PORT=8081
+MOD_DONTDOTHAT_CONF=$(PREFIX)/httpd/conf/dontdothat
+
+$(MOD_DONTDOTHAT_CONF):
+ mkdir -p $(dir $@)
+ echo > $@.tmp '[recursive-actions]'
+ echo >>$@.tmp '/ = deny'
+ mv -f $@.tmp $@
$(HTTPD_CHECK_USERS):
mkdir -p $(dir $@)
@@ -1298,12 +1475,13 @@ $(HTTPD_CHECK_USERS):
echo >>$@.tmp 'jconstant:xCGl35kV9oWCY'
mv -f $@.tmp $@
-$(HTTPD_CHECK_CONF): $(HTTPD_CHECK_USERS)
+$(HTTPD_CHECK_CONF): $(HTTPD_CHECK_USERS) $(MOD_DONTDOTHAT_CONF)
echo > $@.tmp '# httpd config for make check'
echo >>$@.tmp 'ServerRoot "$(PREFIX)/httpd"'
echo >>$@.tmp 'Listen localhost:$(HTTPD_CHECK_PORT)'
echo >>$@.tmp 'LoadModule dav_svn_module $(MOD_DAV_SVN)'
echo >>$@.tmp 'LoadModule authz_svn_module $(MOD_AUTHZ_SVN)'
+ echo >>$@.tmp 'LoadModule dontdothat_module $(MOD_DONTDOTHAT)'
echo >>$@.tmp 'DocumentRoot "$(PREFIX)/httpd/htdocs"'
echo >>$@.tmp '# These two Locations are used for "make check"'
echo >>$@.tmp '<Directory />'
@@ -1347,15 +1525,38 @@ endif
echo >>$@.tmp ' DAV svn'
echo >>$@.tmp ' SVNParentPath /tmp'
echo >>$@.tmp ' Allow from all'
+ echo >>$@.tmp ' #AuthType Basic'
+ echo >>$@.tmp ' #AuthName "Subversion Repository"'
+ echo >>$@.tmp ' #AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' #Require valid-user'
ifeq ($(USE_HTTPV1),yes)
- echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+ echo >> $@.tmp ' SVNAdvertiseV2Protocol off'
endif
ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
- echo >>$@.tmp ' SVNPathAuthz short_circuit'
+ echo >> $@.tmp ' SVNPathAuthz short_circuit'
endif
echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '# Location for tests using mod_dontdothat'
+ echo >>$@.tmp '<Location /ddt-test-work/repositories>'
+ echo >> $@.tmp 'DAV svn'
+ echo >> $@.tmp 'SVNParentPath "$(SVN_WC)/subversion/tests/cmdline/svn-test-work/repositories"'
+ echo >> $@.tmp 'AuthzSVNAccessFile "$(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz"'
+ echo >> $@.tmp 'AuthType Basic'
+ echo >> $@.tmp 'AuthName "Subversion Repository"'
+ echo >> $@.tmp 'AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ echo >> $@.tmp 'AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >> $@.tmp 'Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >> $@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >> $@.tmp 'DontDoThatConfigFile "$(MOD_DONTDOTHAT_CONF)"'
+ echo >> $@.tmp '</Location>'
echo >>$@.tmp 'RedirectMatch permanent ^/svn-test-work/repositories/REDIRECT-PERM-(.*)$$ /svn-test-work/repositories/$$1'
echo >>$@.tmp 'RedirectMatch ^/svn-test-work/repositories/REDIRECT-TEMP-(.*)$$ /svn-test-work/repositories/$$1'
+ echo >>$@.tmp 'Include "conf/$(SVN_REL_WC)*-custom.conf"'
+ echo >> $@.tmp '#SVNInMemoryCacheSize 0'
+ echo >> $@.tmp '#SVNCacheTextDeltas Off'
+ echo >> $@.tmp '#SVNCacheRevProps Off'
mv -f $@.tmp $@
.PHONY: libpath
@@ -1407,7 +1608,7 @@ start-httpd-debug: $(HTTPD_CHECK_CONF)
@sleep 1
gdb $(PREFIX)/httpd/bin/httpd `cat $(PREFIX)/httpd/logs/httpd.pid`
-stop-httpd:
+stop-httpd: $(HTTPD_CHECK_CONF)
$(HTTPD_STOP_CMD)
start-svnserve: $(SVN_OBJDIR)/.compiled
@@ -1421,7 +1622,9 @@ define do_check
echo "Begin test: $(subst svn-check-,,$@) x $$fs"; \
test -d "$(RAMDISK)/tmp" && export TMPDIR="$(RAMDISK)/tmp"; \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) $(LIB_PTHREAD_HACK) \
- make check PARALLEL=$(PARALLEL) CLEANUP=$(CLEANUP) $1 FS_TYPE=$$fs; \
+ env MAKEFLAGS= make check PARALLEL=$(PARALLEL) CLEANUP=$(CLEANUP) \
+ EXCLUSIVE_WC_LOCKS=$(EXCLUSIVE_WC_LOCKS) \
+ MEMCACHED_SERVER=$(MEMCACHED_SERVER) $1 FS_TYPE=$$fs; \
for log in tests.log fails.log; do \
test -f $$log && mv -f $$log $$log.$@-$$fs; \
done; \
@@ -1477,14 +1680,14 @@ svn-check-swig-pl:
(cd $(svn_builddir) && \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
$(LIB_PTHREAD_HACK) \
- make check-swig-pl 2>&1) | \
+ env MAKEFLAGS= make check-swig-pl 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.pl; \
fi
svn-check-swig-py:
-(cd $(svn_builddir) && \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make check-swig-py 2>&1) | \
+ env MAKEFLAGS= make check-swig-py 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.py
# We add the svn prefix to PATH here because the ruby tests
@@ -1495,14 +1698,14 @@ svn-check-swig-rb:
LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
PATH=$(SVN_PREFIX)/bin:$$PATH \
$(LIB_PTHREAD_HACK) \
- make check-swig-rb 2>&1) | \
+ env MAKEFLAGS= make check-swig-rb 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.rb
svn-check-javahl:
-if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
(cd $(svn_builddir) && \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make $(JAVAHL_CHECK_TARGET) 2>&1) | \
+ env MAKEFLAGS= make $(JAVAHL_CHECK_TARGET) 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.javahl; \
fi
diff --git a/tools/dev/wc-ng/svn-wc-db-tester.c b/tools/dev/wc-ng/svn-wc-db-tester.c
new file mode 100644
index 0000000..ccdd102
--- /dev/null
+++ b/tools/dev/wc-ng/svn-wc-db-tester.c
@@ -0,0 +1,269 @@
+/* svn-wc-db-tester.c
+ *
+ * This is a crude command line tool that makes it possible to
+ * run the wc-db validation checks directly.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_cmdline.h"
+#include "svn_pools.h"
+#include "svn_wc.h"
+#include "svn_utf.h"
+#include "svn_path.h"
+#include "svn_opt.h"
+#include "svn_version.h"
+
+#include "private/svn_wc_private.h"
+#include "private/svn_cmdline_private.h"
+
+#include "../../../subversion/libsvn_wc/wc.h"
+#include "../../../subversion/libsvn_wc/wc_db.h"
+
+#include "svn_private_config.h"
+
+#define OPT_VERSION SVN_OPT_FIRST_LONGOPT_ID
+
+static svn_error_t *
+version(apr_pool_t *pool)
+{
+ return svn_opt_print_help4(NULL, "svn-wc-db-tester", TRUE, FALSE, FALSE,
+ NULL, NULL, NULL, NULL, NULL, NULL, pool);
+}
+
+static void
+usage(apr_pool_t *pool)
+{
+ svn_error_clear(svn_cmdline_fprintf
+ (stderr, pool,
+ _("Type 'svn-wc-db-tester --help' for usage.\n")));
+}
+
+struct verify_baton
+{
+ svn_boolean_t found_err;
+};
+
+static svn_error_t *
+verify_cb(void *baton,
+ const char *wc_abspath,
+ const char *local_relpath,
+ int op_depth,
+ int id,
+ const char *msg,
+ apr_pool_t *scratch_pool)
+{
+ struct verify_baton *vb = baton;
+
+ if (op_depth >= 0)
+ {
+ SVN_ERR(svn_cmdline_printf(scratch_pool, "%s (depth=%d) DBV%04d: %s\n",
+ local_relpath, op_depth, id, msg));
+ }
+ else
+ {
+ SVN_ERR(svn_cmdline_printf(scratch_pool, "%s DBV%04d: %s\n",
+ local_relpath, id, msg));
+ }
+
+ vb->found_err = TRUE;
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+verify_db(int argc, const char *path, apr_pool_t *pool)
+{
+ const char *local_abspath;
+ svn_wc_context_t *wc_ctx;
+ struct verify_baton vb = { FALSE };
+
+ /* Read the parameters */
+ path = svn_dirent_internal_style(path, pool);
+
+ SVN_ERR(svn_dirent_get_absolute(&local_abspath, path, pool));
+
+ SVN_ERR(svn_wc_context_create(&wc_ctx, NULL, pool, pool));
+
+ SVN_ERR(svn_wc__db_verify_db_full(wc_ctx->db, local_abspath,
+ verify_cb, &vb, pool));
+
+ if (vb.found_err)
+ return svn_error_create(SVN_ERR_WC_PATH_UNEXPECTED_STATUS, NULL,
+ _("Found one or more potential wc.db inconsistencies"));
+
+ return SVN_NO_ERROR;
+}
+
+
+static void
+help(const apr_getopt_option_t *options, apr_pool_t *pool)
+{
+ svn_error_clear
+ (svn_cmdline_fprintf
+ (stdout, pool,
+ _("usage: svn-wc-db-tester [OPTIONS] WC_PATH\n\n"
+ " Run verifications on the working copy\n"
+ "\n"
+ " WC_PATH's parent directory must be a working copy, otherwise a\n"
+ " tree conflict cannot be raised.\n"
+ "\n"
+ "Valid options:\n")));
+ while (options->description)
+ {
+ const char *optstr;
+ svn_opt_format_option(&optstr, options, TRUE, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool, " %s\n", optstr));
+ ++options;
+ }
+}
+
+
+/* Version compatibility check */
+static svn_error_t *
+check_lib_versions(void)
+{
+ static const svn_version_checklist_t checklist[] =
+ {
+ { "svn_subr", svn_subr_version },
+ { "svn_wc", svn_wc_version },
+ { NULL, NULL }
+ };
+ SVN_VERSION_DEFINE(my_version);
+
+ return svn_ver_check_list2(&my_version, checklist, svn_ver_equal);
+}
+
+/*
+ * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
+ * either return an error to be displayed, or set *EXIT_CODE to non-zero and
+ * return SVN_NO_ERROR.
+ */
+static svn_error_t *
+sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
+{
+ apr_getopt_t *os;
+ const apr_getopt_option_t options[] =
+ {
+ {"help", 'h', 0, N_("display this help")},
+ {"version", OPT_VERSION, 0,
+ N_("show program version information")},
+ {0, 0, 0, 0}
+ };
+ apr_array_header_t *remaining_argv;
+
+ /* Check library versions */
+ SVN_ERR(check_lib_versions());
+
+#if defined(WIN32) || defined(__CYGWIN__)
+ /* Set the working copy administrative directory name. */
+ if (getenv("SVN_ASP_DOT_NET_HACK"))
+ {
+ SVN_ERR(svn_wc_set_adm_dir("_svn", pool));
+ }
+#endif
+
+ SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
+
+ os->interleave = 1;
+ while (1)
+ {
+ int opt;
+ const char *arg;
+ apr_status_t status = apr_getopt_long(os, options, &opt, &arg);
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ if (status != APR_SUCCESS)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ switch (opt)
+ {
+ case 'h':
+ help(options, pool);
+ return SVN_NO_ERROR;
+ case OPT_VERSION:
+ SVN_ERR(version(pool));
+ return SVN_NO_ERROR;
+ default:
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+ }
+
+ /* Convert the remaining arguments to UTF-8. */
+ remaining_argv = apr_array_make(pool, 0, sizeof(const char *));
+ while (os->ind < argc)
+ {
+ const char *s;
+
+ SVN_ERR(svn_utf_cstring_to_utf8(&s, os->argv[os->ind++], pool));
+ APR_ARRAY_PUSH(remaining_argv, const char *) = s;
+ }
+
+ if (remaining_argv->nelts != 1)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ /* Do the main task */
+ SVN_ERR(verify_db(remaining_argv->nelts,
+ APR_ARRAY_IDX(remaining_argv, 0, const char *),
+ pool));
+
+ return SVN_NO_ERROR;
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code = EXIT_SUCCESS;
+ svn_error_t *err;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svn-wc-db-tester", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ err = sub_main(&exit_code, argc, argv, pool);
+
+ /* Flush stdout and report if it fails. It would be flushed on exit anyway
+ but this makes sure that output is not silently lost if it fails. */
+ err = svn_error_compose_create(err, svn_cmdline_fflush(stdout));
+
+ if (err)
+ {
+ exit_code = EXIT_FAILURE;
+ svn_cmdline_handle_exit_error(err, NULL, "svn-wc-db-tester: ");
+ }
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/dev/which-error.py b/tools/dev/which-error.py
index dc6a8f5..46086ac 100755
--- a/tools/dev/which-error.py
+++ b/tools/dev/which-error.py
@@ -23,10 +23,10 @@
# under the License.
# ====================================================================
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/dev/which-error.py $
-# $LastChangedDate: 2012-03-30 20:29:32 +0000 (Fri, 30 Mar 2012) $
-# $LastChangedBy: danielsh $
-# $LastChangedRevision: 1307598 $
+# $HeadURL: https://svn.apache.org/repos/asf/subversion/branches/1.9.x/tools/dev/which-error.py $
+# $LastChangedDate: 2013-12-05 00:42:34 +0000 (Thu, 05 Dec 2013) $
+# $LastChangedBy: breser $
+# $LastChangedRevision: 1547977 $
#
import errno
@@ -72,7 +72,11 @@ def get_errors():
## errno values.
errs.update(errno.errorcode)
## APR-defined errors, from apr_errno.h.
- for line in open(os.path.join(os.path.dirname(sys.argv[0]), 'aprerr.txt')):
+ dirname = os.path.dirname(os.path.realpath(__file__))
+ for line in open(os.path.join(dirname, 'aprerr.txt')):
+ # aprerr.txt parsing duplicated in gen_base.py:write_errno_table()
+ if line.startswith('#'):
+ continue
key, _, val = line.split()
errs[int(val)] = key
## Subversion errors, from svn_error_codes.h.
diff --git a/tools/dev/x509-parser.c b/tools/dev/x509-parser.c
new file mode 100644
index 0000000..882bf6c
--- /dev/null
+++ b/tools/dev/x509-parser.c
@@ -0,0 +1,178 @@
+/* x509-parser.c -- print human readable info from an X.509 certificate
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_pools.h"
+#include "svn_cmdline.h"
+#include "svn_string.h"
+#include "svn_dirent_uri.h"
+#include "svn_io.h"
+#include "svn_base64.h"
+#include "svn_x509.h"
+#include "svn_time.h"
+
+#include "svn_private_config.h"
+
+#define PEM_BEGIN_CERT "-----BEGIN CERTIFICATE-----"
+#define PEM_END_CERT "-----END CERTIFICATE-----"
+
+static svn_error_t *
+show_cert(const svn_string_t *der_cert, apr_pool_t *scratch_pool)
+{
+ svn_x509_certinfo_t *certinfo;
+ const apr_array_header_t *hostnames;
+
+ SVN_ERR(svn_x509_parse_cert(&certinfo, der_cert->data, der_cert->len,
+ scratch_pool, scratch_pool));
+
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Subject: %s\n"),
+ svn_x509_certinfo_get_subject(certinfo, scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Valid from: %s\n"),
+ svn_time_to_human_cstring(
+ svn_x509_certinfo_get_valid_from(certinfo),
+ scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Valid until: %s\n"),
+ svn_time_to_human_cstring(
+ svn_x509_certinfo_get_valid_to(certinfo),
+ scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Issuer: %s\n"),
+ svn_x509_certinfo_get_issuer(certinfo, scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Fingerprint: %s\n"),
+ svn_checksum_to_cstring_display(
+ svn_x509_certinfo_get_digest(certinfo),
+ scratch_pool)));
+
+ hostnames = svn_x509_certinfo_get_hostnames(certinfo);
+ if (hostnames && !apr_is_empty_array(hostnames))
+ {
+ int i;
+ svn_stringbuf_t *buf = svn_stringbuf_create_empty(scratch_pool);
+ for (i = 0; i < hostnames->nelts; ++i)
+ {
+ const char *hostname = APR_ARRAY_IDX(hostnames, i, const char*);
+ if (i > 0)
+ svn_stringbuf_appendbytes(buf, ", ", 2);
+ svn_stringbuf_appendbytes(buf, hostname, strlen(hostname));
+ }
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Hostnames: %s\n"),
+ buf->data));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+static svn_boolean_t
+is_der_cert(const svn_string_t *raw)
+{
+ /* really simplistic fingerprinting of a DER. By definition it must
+ * start with an ASN.1 tag of a constructed (0x20) sequence (0x10).
+ * It's somewhat unfortunate that 0x30 happens to also come out to the
+ * ASCII for '0' which may mean this will create false positives. */
+ return raw->data[0] == 0x30 ? TRUE : FALSE;
+}
+
+static svn_error_t *
+get_der_cert_from_stream(const svn_string_t **der_cert, svn_stream_t *in,
+ apr_pool_t *pool)
+{
+ svn_string_t *raw;
+ SVN_ERR(svn_string_from_stream(&raw, in, pool, pool));
+
+ *der_cert = NULL;
+
+ /* look for a DER cert */
+ if (is_der_cert(raw))
+ {
+ *der_cert = raw;
+ return SVN_NO_ERROR;
+ }
+ else
+ {
+ const svn_string_t *base64_decoded;
+ const char *start, *end;
+
+ /* Try decoding as base64 without headers */
+ base64_decoded = svn_base64_decode_string(raw, pool);
+ if (base64_decoded && is_der_cert(base64_decoded))
+ {
+ *der_cert = base64_decoded;
+ return SVN_NO_ERROR;
+ }
+
+ /* Try decoding as a PEM with begining and ending headers. */
+ start = strstr(raw->data, PEM_BEGIN_CERT);
+ end = strstr(raw->data, PEM_END_CERT);
+ if (start && end && end > start)
+ {
+ svn_string_t *encoded;
+
+ start += sizeof(PEM_BEGIN_CERT) - 1;
+ end -= 1;
+ encoded = svn_string_ncreate(start, end - start, pool);
+ base64_decoded = svn_base64_decode_string(encoded, pool);
+ if (is_der_cert(base64_decoded))
+ {
+ *der_cert = base64_decoded;
+ return SVN_NO_ERROR;
+ }
+ }
+ }
+
+ return svn_error_create(SVN_ERR_X509_CERT_INVALID_PEM, NULL,
+ _("Couldn't find certificate in input data"));
+}
+
+int main (int argc, const char *argv[])
+{
+ apr_pool_t *pool = NULL;
+ svn_error_t *err;
+ svn_stream_t *in;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = svn_pool_create(NULL);
+
+ if (argc == 2)
+ {
+ const char *target = svn_dirent_canonicalize(argv[1], pool);
+ err = svn_stream_open_readonly(&in, target, pool, pool);
+ }
+ else if (argc == 1)
+ {
+ err = svn_stream_for_stdin(&in, pool);
+ }
+ else
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL, _("Too many arguments"));
+
+ if (!err)
+ {
+ const svn_string_t *der_cert;
+ err = get_der_cert_from_stream(&der_cert, in, pool);
+ if (!err)
+ err = show_cert(der_cert, pool);
+ }
+
+ if (err)
+ return svn_cmdline_handle_exit_error(err, pool, "x509-parser: ");
+
+ return 0;
+}