summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2015-03-18 13:33:26 +0000
committer <>2015-07-08 14:41:01 +0000
commitbb0ef45f7c46b0ae221b26265ef98a768c33f820 (patch)
tree98bae10dde41c746c51ae97ec4f879e330415aa7 /tools
parent239dfafe71711b2f4c43d7b90a1228d7bdc5195e (diff)
downloadsubversion-tarball-bb0ef45f7c46b0ae221b26265ef98a768c33f820.tar.gz
Imported from /home/lorry/working-area/delta_subversion-tarball/subversion-1.8.13.tar.gz.subversion-1.8.13
Diffstat (limited to 'tools')
-rwxr-xr-xtools/backup/hot-backup.py.in2
-rwxr-xr-xtools/buildbot/slaves/bb-openbsd/svnbuild.sh4
-rwxr-xr-xtools/buildbot/slaves/bb-openbsd/svncheck-bindings.sh2
-rwxr-xr-xtools/buildbot/slaves/bb-openbsd/svncheck.sh6
-rwxr-xr-xtools/buildbot/slaves/bb-openbsd/svnclean.sh3
-rwxr-xr-xtools/buildbot/slaves/centos/svnbuild.sh7
-rwxr-xr-xtools/buildbot/slaves/centos/svnclean.sh2
-rwxr-xr-xtools/buildbot/slaves/i686-debian-sarge1/svnclean.sh2
-rwxr-xr-xtools/buildbot/slaves/svn-x64-macosx-gnu-shared-daily-ra_serf/svnclean.sh2
-rwxr-xr-xtools/buildbot/slaves/svn-x64-macosx-gnu-shared/svnclean.sh2
-rwxr-xr-xtools/buildbot/slaves/ubuntu-x64/svnbuild.sh2
-rwxr-xr-xtools/buildbot/slaves/ubuntu-x64/svnclean.sh2
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-bindings.cmd43
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-build-bindings.cmd11
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-build.cmd6
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-cleanup.cmd19
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-template.cmd4
-rw-r--r--tools/buildbot/slaves/win32-SharpSvn/svntest-test.cmd30
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/config.bat80
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/do_all.bat48
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/svnbuild.bat112
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/svncheck.bat152
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/svnclean.bat60
-rw-r--r--tools/buildbot/slaves/win32-xp-VS2005/svnlog.bat60
-rw-r--r--tools/buildbot/slaves/xp-vc60-ia32/svnbuild.bat154
-rw-r--r--tools/buildbot/slaves/xp-vc60-ia32/svncheck.bat102
-rw-r--r--tools/buildbot/slaves/xp-vc60-ia32/svnclean.bat56
-rw-r--r--tools/buildbot/slaves/xp-vc60-ia32/svnlog.bat50
-rw-r--r--tools/client-side/bash_completion68
-rwxr-xr-xtools/client-side/detach.py271
-rwxr-xr-xtools/client-side/mergeinfo-sanitizer.py319
-rw-r--r--tools/client-side/svn-bench/cl.h198
-rw-r--r--tools/client-side/svn-bench/client_errors.h97
-rw-r--r--tools/client-side/svn-bench/help-cmd.c94
-rw-r--r--tools/client-side/svn-bench/notify.c1045
-rw-r--r--tools/client-side/svn-bench/null-export-cmd.c346
-rw-r--r--tools/client-side/svn-bench/null-list-cmd.c169
-rw-r--r--tools/client-side/svn-bench/null-log-cmd.c243
-rw-r--r--tools/client-side/svn-bench/svn-bench.c954
-rw-r--r--tools/client-side/svn-bench/util.c92
-rwxr-xr-xtools/client-side/svn-ssl-fingerprints.sh2
-rwxr-xr-xtools/client-side/svn-viewspec.py2
-rwxr-xr-xtools/client-side/svnmucc/svnmucc-test.py359
-rw-r--r--tools/client-side/svnmucc/svnmucc.c1206
-rw-r--r--tools/dev/aprerr.txt138
-rwxr-xr-xtools/dev/benchmarks/large_dirs/create_bigdir.sh25
-rwxr-xr-xtools/dev/benchmarks/suite1/benchmark.py1561
-rwxr-xr-xtools/dev/benchmarks/suite1/cronjob56
-rwxr-xr-xtools/dev/benchmarks/suite1/generate_charts60
-rwxr-xr-xtools/dev/benchmarks/suite1/run135
-rw-r--r--tools/dev/benchmarks/suite1/run.bat206
-rwxr-xr-xtools/dev/contribulyze.py9
-rw-r--r--tools/dev/fsfs-access-map.c678
-rw-r--r--tools/dev/fsfs-reorg.c3147
-rw-r--r--tools/dev/gcov.patch73
-rw-r--r--tools/dev/gdb-py/README29
-rw-r--r--tools/dev/gdb-py/svndbg/__init__.py0
-rw-r--r--tools/dev/gdb-py/svndbg/printers.py417
-rwxr-xr-xtools/dev/gen-py-errors.py93
-rwxr-xr-xtools/dev/histogram.py54
-rwxr-xr-xtools/dev/merge-graph.py58
-rw-r--r--tools/dev/mergegraph/__init__.py20
-rw-r--r--tools/dev/mergegraph/mergegraph.py313
-rw-r--r--tools/dev/mergegraph/save_as_sh.py137
-rwxr-xr-xtools/dev/po-merge.py10
-rwxr-xr-xtools/dev/remove-trailing-whitespace.sh24
-rwxr-xr-xtools/dev/sbox-ospath.py64
-rw-r--r--tools/dev/svnraisetreeconflict/svnraisetreeconflict.c (renamed from tools/dev/svnraisetreeconflict/main.c)38
-rw-r--r--tools/dev/unix-build/Makefile.svn385
-rw-r--r--tools/dev/unix-build/README62
-rwxr-xr-xtools/dev/wc-format.py2
-rwxr-xr-xtools/dev/which-error.py21
-rw-r--r--tools/dev/windows-build/Makefile13
-rw-r--r--tools/dev/windows-build/README9
-rw-r--r--tools/diff/diff.c9
-rw-r--r--tools/dist/_gnupg.py1035
-rwxr-xr-xtools/dist/backport.pl167
-rwxr-xr-xtools/dist/collect_sigs.py11
-rwxr-xr-xtools/dist/dist.sh55
-rwxr-xr-xtools/dist/getsigs.py101
-rwxr-xr-xtools/dist/make-deps-tarball.sh121
-rwxr-xr-xtools/dist/nightly.sh12
-rw-r--r--tools/dist/rat-excludes4
-rwxr-xr-xtools/dist/release.py525
-rw-r--r--tools/dist/templates/download.ezt13
-rw-r--r--tools/dist/templates/nightly-candidates.ezt5
-rw-r--r--tools/dist/templates/rc-candidates.ezt63
-rw-r--r--tools/dist/templates/rc-news.ezt6
-rw-r--r--tools/dist/templates/stable-candidates.ezt97
-rw-r--r--tools/dist/templates/stable-news.ezt19
-rwxr-xr-xtools/examples/SvnCLBrowse2
-rwxr-xr-xtools/examples/blame.py6
-rwxr-xr-xtools/examples/get-location-segments.py51
-rw-r--r--tools/examples/info.rb13
-rwxr-xr-xtools/examples/svnshell.rb2
-rwxr-xr-xtools/examples/walk-config-auth.py76
-rwxr-xr-xtools/hook-scripts/commit-access-control.pl.in2
-rwxr-xr-xtools/hook-scripts/control-chars.py130
-rw-r--r--tools/hook-scripts/mailer/mailer.conf.example11
-rwxr-xr-xtools/hook-scripts/mailer/mailer.py31
-rwxr-xr-xtools/hook-scripts/mailer/tests/mailer-init.sh3
-rw-r--r--tools/hook-scripts/mailer/tests/mailer-t1.output158
-rwxr-xr-xtools/hook-scripts/persist-ephemeral-txnprops.py70
-rwxr-xr-xtools/hook-scripts/svn2feed.py2
-rwxr-xr-xtools/hook-scripts/svnperms.py2
-rw-r--r--tools/hook-scripts/validate-files.conf.example69
-rwxr-xr-xtools/hook-scripts/validate-files.py159
-rwxr-xr-xtools/server-side/fsfs-reshard.py2
-rw-r--r--tools/server-side/fsfs-stats.c2181
-rw-r--r--tools/server-side/mod_dontdothat/mod_dontdothat.c80
-rwxr-xr-xtools/server-side/svn-backup-dumps.py2
-rw-r--r--tools/server-side/svn-rep-sharing-stats.c17
-rw-r--r--tools/server-side/svnauthz-validate.c76
-rw-r--r--tools/server-side/svnauthz.c771
-rwxr-xr-xtools/server-side/svnpredumpfilter.py319
-rw-r--r--tools/server-side/svnpubsub/README.txt24
-rwxr-xr-xtools/server-side/svnpubsub/commit-hook.py93
-rw-r--r--tools/server-side/svnpubsub/daemonize.py272
-rwxr-xr-xtools/server-side/svnpubsub/irkerbridge.py322
l---------tools/server-side/svnpubsub/rc.d/svnpubsub1
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnpubsub.debian62
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnpubsub.freebsd37
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnpubsub.solaris53
l---------tools/server-side/svnpubsub/rc.d/svnwcsub1
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnwcsub.debian65
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnwcsub.freebsd39
-rwxr-xr-xtools/server-side/svnpubsub/rc.d/svnwcsub.solaris56
-rw-r--r--tools/server-side/svnpubsub/svnpubsub.tac33
-rw-r--r--tools/server-side/svnpubsub/svnpubsub/__init__.py1
-rw-r--r--tools/server-side/svnpubsub/svnpubsub/client.py230
-rw-r--r--tools/server-side/svnpubsub/svnpubsub/server.py241
-rwxr-xr-xtools/server-side/svnpubsub/svntweet.py237
-rw-r--r--tools/server-side/svnpubsub/svnwcsub.conf.example16
-rwxr-xr-xtools/server-side/svnpubsub/svnwcsub.py546
-rwxr-xr-xtools/server-side/svnpubsub/testserver.py50
-rwxr-xr-xtools/server-side/svnpubsub/watcher.py55
136 files changed, 19619 insertions, 3618 deletions
diff --git a/tools/backup/hot-backup.py.in b/tools/backup/hot-backup.py.in
index 20dd794..87b0bb1 100755
--- a/tools/backup/hot-backup.py.in
+++ b/tools/backup/hot-backup.py.in
@@ -28,7 +28,7 @@
# under the License.
# ====================================================================
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/backup/hot-backup.py.in $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/backup/hot-backup.py.in $
# $LastChangedDate: 2010-08-20 04:30:52 +0000 (Fri, 20 Aug 2010) $
# $LastChangedBy: cmpilato $
# $LastChangedRevision: 987379 $
diff --git a/tools/buildbot/slaves/bb-openbsd/svnbuild.sh b/tools/buildbot/slaves/bb-openbsd/svnbuild.sh
index 9486acd..42c3260 100755
--- a/tools/buildbot/slaves/bb-openbsd/svnbuild.sh
+++ b/tools/buildbot/slaves/bb-openbsd/svnbuild.sh
@@ -22,5 +22,7 @@
set -e
set -x
+export JAVA_HOME=/usr/local/jdk-1.7.0
+
branch="$(basename $(svn info . | grep ^URL | cut -d' ' -f2))"
-(cd .. && gmake BRANCH="$branch")
+(cd .. && gmake BRANCH="$branch" THREADING="no")
diff --git a/tools/buildbot/slaves/bb-openbsd/svncheck-bindings.sh b/tools/buildbot/slaves/bb-openbsd/svncheck-bindings.sh
index 3184010..7efb796 100755
--- a/tools/buildbot/slaves/bb-openbsd/svncheck-bindings.sh
+++ b/tools/buildbot/slaves/bb-openbsd/svncheck-bindings.sh
@@ -24,7 +24,7 @@ set -x
branch="$(basename $(svn info . | grep ^URL | cut -d' ' -f2))"
export MALLOC_OPTIONS=S
-(cd .. && gmake BRANCH="$branch" svn-check-bindings)
+(cd .. && gmake BRANCH="$branch" THREADING="no" svn-check-bindings)
grep -q "^Result: PASS$" tests.log.bindings.pl || exit 1
grep -q "^OK$" tests.log.bindings.py || exit 1
tail -n 1 tests.log.bindings.rb | grep -q ", 0 failures, 0 errors" || exit 1
diff --git a/tools/buildbot/slaves/bb-openbsd/svncheck.sh b/tools/buildbot/slaves/bb-openbsd/svncheck.sh
index e5d1bca..ef785b1 100755
--- a/tools/buildbot/slaves/bb-openbsd/svncheck.sh
+++ b/tools/buildbot/slaves/bb-openbsd/svncheck.sh
@@ -24,11 +24,11 @@ set -x
branch="$(basename $(svn info . | grep ^URL | cut -d' ' -f2))"
export MALLOC_OPTIONS=S
-(cd .. && gmake BRANCH="$branch" PARALLEL="" \
+(cd .. && gmake BRANCH="$branch" PARALLEL="" THREADING="no" \
svn-check-local \
svn-check-svn \
svn-check-neon \
svn-check-serf)
-grep -q "^FAIL:" tests.log* && exit 1
-grep -q "^XPASS:" tests.log* && exit 1
+grep -q "^FAIL:" tests.log.svn-check* && exit 1
+grep -q "^XPASS:" tests.log.svn-check* && exit 1
exit 0
diff --git a/tools/buildbot/slaves/bb-openbsd/svnclean.sh b/tools/buildbot/slaves/bb-openbsd/svnclean.sh
index 6273790..82dbbbb 100755
--- a/tools/buildbot/slaves/bb-openbsd/svnclean.sh
+++ b/tools/buildbot/slaves/bb-openbsd/svnclean.sh
@@ -24,9 +24,10 @@ set -x
branch="$(basename $(svn info . | grep ^URL | cut -d' ' -f2))"
(test -h ../svn-trunk || ln -s build ../svn-trunk)
-for i in 3 4 5 6 7; do
+for i in 6 7; do
(test -h ../svn-1.${i}.x || ln -s build ../svn-1.${i}.x)
done
svn update ../../unix-build
(test -h ../GNUmakefile || ln -s ../unix-build/Makefile.svn ../GNUmakefile)
(cd .. && gmake BRANCH="$branch" reset clean)
+rm -f tests.log* fails.log*
diff --git a/tools/buildbot/slaves/centos/svnbuild.sh b/tools/buildbot/slaves/centos/svnbuild.sh
index c7d9145..9278aeb 100755
--- a/tools/buildbot/slaves/centos/svnbuild.sh
+++ b/tools/buildbot/slaves/centos/svnbuild.sh
@@ -23,6 +23,7 @@
set -x
export MAKEFLAGS=-j4
+export PYTHON=/usr/local/python25/bin/python
echo "========= autogen.sh"
./autogen.sh || exit $?
@@ -30,15 +31,17 @@ echo "========= autogen.sh"
echo "========= configure"
# --with-junit=/usr/share/java/junit.jar
# --with-jdk=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64 \
+# --without-berkeley-db \
./configure --enable-javahl --enable-maintainer-mode \
--with-neon=/usr \
+ --with-serf=/usr/local \
--with-apxs=/usr/sbin/apxs \
- --without-berkeley-db \
+ --with-berkeley-db \
--with-apr=/usr \
--with-apr-util=/usr \
--with-jdk=/opt/java/jdk1.6.0_15 \
--with-junit=/home/bt/junit-4.4.jar \
- --with-sqlite=/home/bt/sqlite-3.6.17/sqlite3.c \
+ --with-sqlite=/home/bt/packages/sqlite-amalgamation-dir/sqlite3.c \
|| exit $?
echo "========= make"
diff --git a/tools/buildbot/slaves/centos/svnclean.sh b/tools/buildbot/slaves/centos/svnclean.sh
index 9a5e715..95d4290 100755
--- a/tools/buildbot/slaves/centos/svnclean.sh
+++ b/tools/buildbot/slaves/centos/svnclean.sh
@@ -27,6 +27,6 @@ echo "========= unmount RAM disc"
test -e ../unmount-ramdrive && ../unmount-ramdrive
echo "========= make extraclean"
-test -e Makefile && (make extraclean || exit $?)
+test -e Makefile && { make extraclean || exit $?; }
exit 0
diff --git a/tools/buildbot/slaves/i686-debian-sarge1/svnclean.sh b/tools/buildbot/slaves/i686-debian-sarge1/svnclean.sh
index 9a5e715..95d4290 100755
--- a/tools/buildbot/slaves/i686-debian-sarge1/svnclean.sh
+++ b/tools/buildbot/slaves/i686-debian-sarge1/svnclean.sh
@@ -27,6 +27,6 @@ echo "========= unmount RAM disc"
test -e ../unmount-ramdrive && ../unmount-ramdrive
echo "========= make extraclean"
-test -e Makefile && (make extraclean || exit $?)
+test -e Makefile && { make extraclean || exit $?; }
exit 0
diff --git a/tools/buildbot/slaves/svn-x64-macosx-gnu-shared-daily-ra_serf/svnclean.sh b/tools/buildbot/slaves/svn-x64-macosx-gnu-shared-daily-ra_serf/svnclean.sh
index b5877a5..586e203 100755
--- a/tools/buildbot/slaves/svn-x64-macosx-gnu-shared-daily-ra_serf/svnclean.sh
+++ b/tools/buildbot/slaves/svn-x64-macosx-gnu-shared-daily-ra_serf/svnclean.sh
@@ -23,7 +23,7 @@
# ../unmount_ramd.sh
echo "========= make extraclean"
-test -e Makefile && (make extraclean || exit $?)
+test -e Makefile && { make extraclean || exit $?; }
rm -rf ../build/*
rm -rf .svn
rm -rf .buildbot-sourcedata
diff --git a/tools/buildbot/slaves/svn-x64-macosx-gnu-shared/svnclean.sh b/tools/buildbot/slaves/svn-x64-macosx-gnu-shared/svnclean.sh
index b5877a5..586e203 100755
--- a/tools/buildbot/slaves/svn-x64-macosx-gnu-shared/svnclean.sh
+++ b/tools/buildbot/slaves/svn-x64-macosx-gnu-shared/svnclean.sh
@@ -23,7 +23,7 @@
# ../unmount_ramd.sh
echo "========= make extraclean"
-test -e Makefile && (make extraclean || exit $?)
+test -e Makefile && { make extraclean || exit $?; }
rm -rf ../build/*
rm -rf .svn
rm -rf .buildbot-sourcedata
diff --git a/tools/buildbot/slaves/ubuntu-x64/svnbuild.sh b/tools/buildbot/slaves/ubuntu-x64/svnbuild.sh
index 31edb1a..e8005b9 100755
--- a/tools/buildbot/slaves/ubuntu-x64/svnbuild.sh
+++ b/tools/buildbot/slaves/ubuntu-x64/svnbuild.sh
@@ -30,7 +30,7 @@ echo "========= autogen.sh"
echo "========= configure"
./configure --enable-javahl --enable-maintainer-mode \
--without-berkeley-db \
- --with-jdk=/usr/lib/jvm/java-6-openjdk/ \
+ --with-jdk=/usr/lib/jvm/java-7-openjdk-amd64/ \
--with-junit=/usr/share/java/junit.jar || exit $?
echo "========= make"
diff --git a/tools/buildbot/slaves/ubuntu-x64/svnclean.sh b/tools/buildbot/slaves/ubuntu-x64/svnclean.sh
index 8cee0c4..4f886ac 100755
--- a/tools/buildbot/slaves/ubuntu-x64/svnclean.sh
+++ b/tools/buildbot/slaves/ubuntu-x64/svnclean.sh
@@ -24,6 +24,6 @@ set -x
echo "========= make extraclean"
cd build
-test -e Makefile && (make extraclean || exit $?)
+test -e Makefile && { make extraclean || exit $?; }
exit 0
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-bindings.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-bindings.cmd
index f830812..f3c551e 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-bindings.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-bindings.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -32,9 +32,7 @@ IF ERRORLEVEL 1 (
PATH %PATH%;%TESTDIR%\bin
SET result=0
-
-echo python win-tests.py -r -f fsfs --javahl "%TESTDIR%\tests"
-python win-tests.py -r -f fsfs --javahl "%TESTDIR%\tests"
+python win-tests.py -d -f fsfs --javahl "%TESTDIR%\tests"
IF ERRORLEVEL 1 (
echo [python reported error %ERRORLEVEL%]
SET result=1
@@ -44,10 +42,10 @@ IF EXIST "%TESTDIR%\swig" rmdir /s /q "%TESTDIR%\swig"
mkdir "%TESTDIR%\swig\py-release\libsvn"
mkdir "%TESTDIR%\swig\py-release\svn"
-xcopy "release\subversion\bindings\swig\python\*.pyd" "%TESTDIR%\swig\py-release\libsvn\*.pyd"
-xcopy "release\subversion\bindings\swig\python\libsvn_swig_py\*.dll" "%TESTDIR%\swig\py-release\libsvn\*.dll"
-xcopy "subversion\bindings\swig\python\*.py" "%TESTDIR%\swig\py-release\libsvn\*.py"
-xcopy "subversion\bindings\swig\python\svn\*.py" "%TESTDIR%\swig\py-release\svn\*.py"
+xcopy "release\subversion\bindings\swig\python\*.pyd" "%TESTDIR%\swig\py-release\libsvn\*.pyd" > nul:
+xcopy "release\subversion\bindings\swig\python\libsvn_swig_py\*.dll" "%TESTDIR%\swig\py-release\libsvn\*.dll" > nul:
+xcopy "subversion\bindings\swig\python\*.py" "%TESTDIR%\swig\py-release\libsvn\*.py" > nul:
+xcopy "subversion\bindings\swig\python\svn\*.py" "%TESTDIR%\swig\py-release\svn\*.py" > nul:
SET PYTHONPATH=%TESTDIR%\swig\py-release
@@ -57,4 +55,31 @@ IF ERRORLEVEL 1 (
SET result=1
)
+mkdir "%TESTDIR%\swig\pl-release\SVN"
+mkdir "%TESTDIR%\swig\pl-release\auto\SVN"
+xcopy subversion\bindings\swig\perl\native\*.pm "%TESTDIR%\swig\pl-release\SVN" > nul:
+pushd release\subversion\bindings\swig\perl\native
+for %%i in (*.dll) do (
+ set name=%%i
+ mkdir "%TESTDIR%\swig\pl-release\auto\SVN\!name:~0,-4!"
+ xcopy "!name:~0,-4!.*" "%TESTDIR%\swig\pl-release\auto\SVN\!name:~0,-4!" > nul:
+ xcopy /y "_Core.dll" "%TESTDIR%\swig\pl-release\auto\SVN\!name:~0,-4!" > nul:
+)
+popd
+
+svnversion . /1.7.x | find "S" > nul:
+IF ERRORLEVEL 1 (
+ ECHO --- Building 1.7.x: Skipping perl tests ---
+ EXIT /B %result%
+)
+
+SET PERL5LIB=%PERL5LIB%;%TESTDIR%\swig\pl-release;
+pushd subversion\bindings\swig\perl\native
+perl -MExtUtils::Command::MM -e test_harness() t\*.t
+IF ERRORLEVEL 1 (
+ echo [Perl reported error %ERRORLEVEL%]
+ SET result=1
+)
+popd
+
exit /b %result%
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-build-bindings.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-build-bindings.cmd
index a7f185f..9ed5879 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-build-bindings.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-build-bindings.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -29,5 +29,8 @@ IF ERRORLEVEL 1 (
EXIT /B 0
)
-msbuild subversion_vcnet.sln /p:Configuration=Release /p:Platform=win32 /t:__JAVAHL__ /t:__SWIG_PYTHON__ /t:__SWIG_PERL__ /t:__JAVAHL_TESTS__
-IF ERRORLEVEL 1 EXIT /B 1
+msbuild subversion_vcnet.sln /p:Configuration=Debug /p:Platform=win32 /t:__JAVAHL__ /t:__JAVAHL_TESTS__
+IF ERRORLEVEL 1 EXIT /B 1
+
+msbuild subversion_vcnet.sln /p:Configuration=Release /p:Platform=win32 /t:__SWIG_PYTHON__ /t:__SWIG_PERL__
+IF ERRORLEVEL 1 EXIT /B 1
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-build.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-build.cmd
index 23c7cb9..27ca272 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-build.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-build.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -25,7 +25,7 @@ IF ERRORLEVEL 1 EXIT /B 1
PUSHD ..\deps
-nant gen-dev -D:wc=..\build -D:impBase=../deps/build/win32 %NANTARGS%
+nant gen-dev -D:wc=..\build -D:impBase=../deps/build/win32 -D:botBuild=true %NANTARGS%
IF ERRORLEVEL 1 EXIT /B 1
POPD
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-cleanup.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-cleanup.cmd
index 8e4ea01..fc0adb6 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-cleanup.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-cleanup.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -31,12 +31,13 @@ IF NOT EXIST "imports\" (
svn co --username guest --password "" http://sharpsvn.open.collab.net/svn/sharpsvn/trunk/imports imports
)
IF NOT EXIST build\imports.done (
+ svn up imports
copy /y imports\dev-default.build default.build
- nant build %NANTARGS%
+ nant prep-dev %NANTARGS%
IF ERRORLEVEL 1 (
exit /B 1
)
- del release\bin\*svn* release\bin\_*.*
+ del release\bin\*svn* release\bin\_*.* 2>nul:
echo. > build\imports.done
)
@@ -48,15 +49,25 @@ IF NOT ERRORLEVEL 1 (
)
POPD
+
taskkill /im svn.exe /f 2> nul:
+taskkill /im svnlook.exe /f 2> nul:
taskkill /im svnadmin.exe /f 2> nul:
taskkill /im svnserve.exe /f 2> nul:
taskkill /im svnrdump.exe /f 2> nul:
taskkill /im svnsync.exe /f 2> nul:
taskkill /im httpd.exe /f 2> nul:
+taskkill /im fs-test.exe /f 2> nul:
+taskkill /im op-depth-test.exe /f 2> nul:
+taskkill /im java.exe /f 2> nul:
+taskkill /im perl.exe /f 2> nul:
+taskkill /im mspdbsrv.exe /f 2> nul:
IF EXIST "%TESTDIR%\tests\subversion\tests\cmdline\httpd\" (
rmdir /s /q "%TESTDIR%\tests\subversion\tests\cmdline\httpd"
)
+IF EXIST "%TESTDIR%\swig\" (
+ rmdir /s /q "%TESTDIR%\swig"
+)
del "%TESTDIR%\tests\*.log" 2> nul:
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-template.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-template.cmd
index 272d437..1034173 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-template.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-template.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
diff --git a/tools/buildbot/slaves/win32-SharpSvn/svntest-test.cmd b/tools/buildbot/slaves/win32-SharpSvn/svntest-test.cmd
index 0ab32f1..522cdae 100644
--- a/tools/buildbot/slaves/win32-SharpSvn/svntest-test.cmd
+++ b/tools/buildbot/slaves/win32-SharpSvn/svntest-test.cmd
@@ -7,9 +7,9 @@ REM regarding copyright ownership. The ASF licenses this file
REM to you under the Apache License, Version 2.0 (the
REM "License"); you may not use this file except in compliance
REM with the License. You may obtain a copy of the License at
-REM
+REM
REM http://www.apache.org/licenses/LICENSE-2.0
-REM
+REM
REM Unless required by applicable law or agreed to in writing,
REM software distributed under the License is distributed on an
REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -51,10 +51,10 @@ IF "%1" == "-r" (
SET SVN=1
SHIFT
) ELSE IF "%1" == "serf" (
- SET SERF=1
+ SET DAV=1
SHIFT
-) ELSE IF "%1" == "neon" (
- SET NEON=1
+) ELSE IF "%1" == "dav" (
+ SET DAV=1
SHIFT
) ELSE (
SET ARGS=!ARGS! -t %1
@@ -63,6 +63,7 @@ IF "%1" == "-r" (
IF NOT "%1" == "" GOTO next
+taskkill /im svnserve.exe httpd.exe /f 2> nul:
IF NOT EXIST "%TESTDIR%\bin" MKDIR "%TESTDIR%\bin"
xcopy /y /i ..\deps\release\bin\*.dll "%TESTDIR%\bin"
@@ -76,22 +77,13 @@ IF "%LOCAL%+%FSFS%" == "1+1" (
)
IF "%SVN%+%FSFS%" == "1+1" (
- taskkill /im svnserve.exe /f 2> nul:
- echo win-tests.py -c %PARALLEL% %MODE% -f fsfs -u svn://localhost %ARGS% "%TESTDIR%\tests"
- win-tests.py -c %PARALLEL% %MODE% -f fsfs -u svn://localhost %ARGS% "%TESTDIR%\tests"
+ echo win-tests.py -c %PARALLEL% %MODE% -f fsfs -u svn://127.0.0.1 %ARGS% "%TESTDIR%\tests"
+ win-tests.py -c %PARALLEL% %MODE% -f fsfs -u svn://127.0.0.1 %ARGS% "%TESTDIR%\tests"
IF ERRORLEVEL 1 EXIT /B 1
)
-IF "%SERF%+%FSFS%" == "1+1" (
- taskkill /im httpd.exe /f 2> nul:
- echo win-tests.py -c %PARALLEL% %MODE% -f fsfs --http-library serf --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://localhost:%TESTPORT% %ARGS% "%TESTDIR%\tests"
- win-tests.py -c %PARALLEL% %MODE% -f fsfs --http-library serf --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://localhost:%TESTPORT% %ARGS% "%TESTDIR%\tests"
- IF ERRORLEVEL 1 EXIT /B 1
-)
-
-IF "%NEON%+%FSFS%" == "1+1" (
- taskkill /im httpd.exe /f 2> nul:
- echo win-tests.py -c %PARALLEL% %MODE% -f fsfs --http-library neon --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://localhost:%TESTPORT% %ARGS% "%TESTDIR%\tests"
- win-tests.py -c %PARALLEL% %MODE% -f fsfs --http-library neon --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://localhost:%TESTPORT% %ARGS% "%TESTDIR%\tests"
+IF "%DAV%+%FSFS%" == "1+1" (
+ echo win-tests.py -c %PARALLEL% %MODE% -f fsfs --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://127.0.0.1:%TESTPORT% %ARGS% "%TESTDIR%\tests"
+ win-tests.py -c %PARALLEL% %MODE% -f fsfs --httpd-dir "%CD%\..\deps\release\httpd" --httpd-port %TESTPORT% -u http://127.0.0.1:%TESTPORT% %ARGS% "%TESTDIR%\tests"
IF ERRORLEVEL 1 EXIT /B 1
)
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/config.bat b/tools/buildbot/slaves/win32-xp-VS2005/config.bat
index 19e4cf0..2178f93 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/config.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/config.bat
@@ -1,40 +1,40 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-@echo off
-set HTTPD_BIN_DIR=C:\Apache2
-set GETTEXT_DIR=C:\svn-builder\djh-xp-vse2005\gettext
-set TEST_DIR=M:\svn-auto-test
-
-set HTTPD_SRC_DIR=..\httpd
-set BDB_DIR=..\db4-win32
-set NEON_DIR=..\neon
-set ZLIB_DIR=..\zlib
-set OPENSSL_DIR=..\openssl
-set INTL_DIR=..\svn-libintl
-
-REM Uncomment this if you want clean subversion build, after testing
-REM set CLEAN_SVN=1
-
-REM Uncomment this if you want disable ra_svn tests
-REM set NO_RA_SVN=1
-
-REM Uncomment this if you want disable ra_dav tests
-REM set NO_RA_HTTP=1
-
-set PATH=%GETTEXT_DIR%\bin;%PATH%
-call C:\VCX2005\VC\vcvarsall.bat x86
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+@echo off
+set HTTPD_BIN_DIR=C:\Apache2
+set GETTEXT_DIR=C:\svn-builder\djh-xp-vse2005\gettext
+set TEST_DIR=M:\svn-auto-test
+
+set HTTPD_SRC_DIR=..\httpd
+set BDB_DIR=..\db4-win32
+set NEON_DIR=..\neon
+set ZLIB_DIR=..\zlib
+set OPENSSL_DIR=..\openssl
+set INTL_DIR=..\svn-libintl
+
+REM Uncomment this if you want clean subversion build, after testing
+REM set CLEAN_SVN=1
+
+REM Uncomment this if you want disable ra_svn tests
+REM set NO_RA_SVN=1
+
+REM Uncomment this if you want disable ra_dav tests
+REM set NO_RA_HTTP=1
+
+set PATH=%GETTEXT_DIR%\bin;%PATH%
+call C:\VCX2005\VC\vcvarsall.bat x86
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/do_all.bat b/tools/buildbot/slaves/win32-xp-VS2005/do_all.bat
index 5adbe5b..6848f9d 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/do_all.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/do_all.bat
@@ -1,24 +1,24 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-cmd.exe /c ..\svnbuild.bat > build.log
-cmd.exe /c ..\svncheck.bat fsfs ra_local > fsfs_local.log
-cmd.exe /c ..\svncheck.bat fsfs ra_svn > fsfs_svn.log
-cmd.exe /c ..\svncheck.bat fsfs ra_dav > fsfs_dav.log
-cmd.exe /c ..\svncheck.bat bdb ra_local > bdb_local.log
-cmd.exe /c ..\svncheck.bat bdb ra_svn > bdb_svn.log
-cmd.exe /c ..\svncheck.bat bdb ra_dav > bdb_dav.log
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+cmd.exe /c ..\svnbuild.bat > build.log
+cmd.exe /c ..\svncheck.bat fsfs ra_local > fsfs_local.log
+cmd.exe /c ..\svncheck.bat fsfs ra_svn > fsfs_svn.log
+cmd.exe /c ..\svncheck.bat fsfs ra_dav > fsfs_dav.log
+cmd.exe /c ..\svncheck.bat bdb ra_local > bdb_local.log
+cmd.exe /c ..\svncheck.bat bdb ra_svn > bdb_svn.log
+cmd.exe /c ..\svncheck.bat bdb ra_dav > bdb_dav.log
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/svnbuild.bat b/tools/buildbot/slaves/win32-xp-VS2005/svnbuild.bat
index 40f33fc..3724c85 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/svnbuild.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/svnbuild.bat
@@ -1,56 +1,56 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-@echo off
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-cmd.exe /c call ..\svnclean.bat
-
-set PARAMS=-t vcproj --vsnet-version=2005 --with-berkeley-db=%BDB_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR% --with-libintl=%INTL_DIR%
-REM set PARAMS=-t vcproj --vsnet-version=2005 --with-berkeley-db=%BDB_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR%
-IF NOT "%OPENSSL_DIR%"=="" set PARAMS=%PARAMS% --with-openssl=%OPENSSL_DIR%
-
-python gen-make.py %PARAMS%
-IF ERRORLEVEL 1 GOTO ERROR
-
-REM MSDEV.COM %HTTPD_SRC_DIR%\apache.dsw /MAKE "BuildBin - Win32 Release"
-REM IF ERRORLEVEL 1 GOTO ERROR
-
-rem MSBUILD subversion_vcnet.sln /t:__ALL_TESTS__ /p:Configuration=Debug
-MSBUILD subversion_vcnet.sln /t:__ALL_TESTS__ /p:Configuration=Release
-IF ERRORLEVEL 1 GOTO ERROR
-MSBUILD subversion_vcnet.sln /t:__SWIG_PYTHON__ /p:Configuration=Release
-IF ERRORLEVEL 1 GOTO ERROR
-MSBUILD subversion_vcnet.sln /t:__SWIG_PERL__ /p:Configuration=Release
-IF ERRORLEVEL 1 GOTO ERROR
-MSBUILD subversion_vcnet.sln /t:__JAVAHL__ /p:Configuration=Release
-IF ERRORLEVEL 1 GOTO ERROR
-
-EXIT 0
-
-REM ----------------------------------------------------
-:ERROR
-ECHO.
-ECHO *** Whoops, something choked.
-ECHO.
-CD ..
-EXIT 1
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+@echo off
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+cmd.exe /c call ..\svnclean.bat
+
+set PARAMS=-t vcproj --vsnet-version=2005 --with-berkeley-db=%BDB_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR% --with-libintl=%INTL_DIR%
+REM set PARAMS=-t vcproj --vsnet-version=2005 --with-berkeley-db=%BDB_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR%
+IF NOT "%OPENSSL_DIR%"=="" set PARAMS=%PARAMS% --with-openssl=%OPENSSL_DIR%
+
+python gen-make.py %PARAMS%
+IF ERRORLEVEL 1 GOTO ERROR
+
+REM MSDEV.COM %HTTPD_SRC_DIR%\apache.dsw /MAKE "BuildBin - Win32 Release"
+REM IF ERRORLEVEL 1 GOTO ERROR
+
+rem MSBUILD subversion_vcnet.sln /t:__ALL_TESTS__ /p:Configuration=Debug
+MSBUILD subversion_vcnet.sln /t:__ALL_TESTS__ /p:Configuration=Release
+IF ERRORLEVEL 1 GOTO ERROR
+MSBUILD subversion_vcnet.sln /t:__SWIG_PYTHON__ /p:Configuration=Release
+IF ERRORLEVEL 1 GOTO ERROR
+MSBUILD subversion_vcnet.sln /t:__SWIG_PERL__ /p:Configuration=Release
+IF ERRORLEVEL 1 GOTO ERROR
+MSBUILD subversion_vcnet.sln /t:__JAVAHL__ /p:Configuration=Release
+IF ERRORLEVEL 1 GOTO ERROR
+
+EXIT 0
+
+REM ----------------------------------------------------
+:ERROR
+ECHO.
+ECHO *** Whoops, something choked.
+ECHO.
+CD ..
+EXIT 1
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/svncheck.bat b/tools/buildbot/slaves/win32-xp-VS2005/svncheck.bat
index 9061449..91a3b62 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/svncheck.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/svncheck.bat
@@ -1,76 +1,76 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-@echo off
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-set FS_TYPE=%1
-set RA_TYPE=%2
-
-REM By default, return zero
-set ERR=0
-
-if "%RA_TYPE%"=="ra_local" goto ra_local
-if "%RA_TYPE%"=="ra_svn" goto ra_svn
-if "%RA_TYPE%"=="ra_dav" goto ra_dav
-
-echo Unknown ra method '%RA_TYPE%'
-EXIT 3
-
-:ra_local
-time /T
-python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r
-if ERRORLEVEL 1 set ERR=1
-time /T
-echo.
-echo.
-echo Detailed log for %FS_TYPE%\tests.log:
-type %TEST_DIR%\%FS_TYPE%\tests.log
-echo End of log for %FS_TYPE%\tests.log
-echo.
-EXIT %ERR%
-
-:ra_svn
-time /T
-python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r -u svn://localhost
-if ERRORLEVEL 1 set ERR=1
-time /T
-echo.
-echo.
-echo Detailed log for %FS_TYPE%\svn-tests.log:
-type %TEST_DIR%\%FS_TYPE%\svn-tests.log
-echo End of log for %FS_TYPE%\svn-tests.log
-echo.
-EXIT %ERR%
-
-:ra_dav
-time /T
-python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r --httpd-dir="%HTTPD_BIN_DIR%" --httpd-port 1234
-if ERRORLEVEL 1 set ERR=1
-time /T
-echo.
-echo.
-echo Detailed log for %FS_TYPE%\dav-tests.log:
-type %TEST_DIR%\%FS_TYPE%\dav-tests.log
-echo End of log for %FS_TYPE%\dav-tests.log
-echo.
-EXIT %ERR%
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+@echo off
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+set FS_TYPE=%1
+set RA_TYPE=%2
+
+REM By default, return zero
+set ERR=0
+
+if "%RA_TYPE%"=="ra_local" goto ra_local
+if "%RA_TYPE%"=="ra_svn" goto ra_svn
+if "%RA_TYPE%"=="ra_dav" goto ra_dav
+
+echo Unknown ra method '%RA_TYPE%'
+EXIT 3
+
+:ra_local
+time /T
+python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r
+if ERRORLEVEL 1 set ERR=1
+time /T
+echo.
+echo.
+echo Detailed log for %FS_TYPE%\tests.log:
+type %TEST_DIR%\%FS_TYPE%\tests.log
+echo End of log for %FS_TYPE%\tests.log
+echo.
+EXIT %ERR%
+
+:ra_svn
+time /T
+python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r -u svn://localhost
+if ERRORLEVEL 1 set ERR=1
+time /T
+echo.
+echo.
+echo Detailed log for %FS_TYPE%\svn-tests.log:
+type %TEST_DIR%\%FS_TYPE%\svn-tests.log
+echo End of log for %FS_TYPE%\svn-tests.log
+echo.
+EXIT %ERR%
+
+:ra_dav
+time /T
+python win-tests.py %TEST_DIR%\%FS_TYPE% -f %FS_TYPE% -c -r --httpd-dir="%HTTPD_BIN_DIR%" --httpd-port 1234
+if ERRORLEVEL 1 set ERR=1
+time /T
+echo.
+echo.
+echo Detailed log for %FS_TYPE%\dav-tests.log:
+type %TEST_DIR%\%FS_TYPE%\dav-tests.log
+echo End of log for %FS_TYPE%\dav-tests.log
+echo.
+EXIT %ERR%
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/svnclean.bat b/tools/buildbot/slaves/win32-xp-VS2005/svnclean.bat
index cc9e626..9c05b25 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/svnclean.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/svnclean.bat
@@ -1,30 +1,30 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-@echo off
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-REM if NOT "%CLEAN_SVN%"=="" MSBUILD subversion_vcnet.sln /t:Clean /p:Configuration=Release
-rmdir /s /q Release
-rmdir /s /q %TEST_DIR%
-
-EXIT 0
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+@echo off
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+REM if NOT "%CLEAN_SVN%"=="" MSBUILD subversion_vcnet.sln /t:Clean /p:Configuration=Release
+rmdir /s /q Release
+rmdir /s /q %TEST_DIR%
+
+EXIT 0
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/win32-xp-VS2005/svnlog.bat b/tools/buildbot/slaves/win32-xp-VS2005/svnlog.bat
index 8aa0501..df6760a 100644
--- a/tools/buildbot/slaves/win32-xp-VS2005/svnlog.bat
+++ b/tools/buildbot/slaves/win32-xp-VS2005/svnlog.bat
@@ -1,30 +1,30 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-@echo off
-REM IF NOT EXIST ..\config.bat GOTO noconfig
-REM call ..\config.bat
-
-echo.
-echo Detailed test logs included in svncheck.bat log.
-echo.
-
-EXIT 0
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+@echo off
+REM IF NOT EXIST ..\config.bat GOTO noconfig
+REM call ..\config.bat
+
+echo.
+echo Detailed test logs included in svncheck.bat log.
+echo.
+
+EXIT 0
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/xp-vc60-ia32/svnbuild.bat b/tools/buildbot/slaves/xp-vc60-ia32/svnbuild.bat
index a8852de..2d5a671 100644
--- a/tools/buildbot/slaves/xp-vc60-ia32/svnbuild.bat
+++ b/tools/buildbot/slaves/xp-vc60-ia32/svnbuild.bat
@@ -1,77 +1,77 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-IF NOT "%OPENSSL_DIR%"=="" call :openssl
-IF ERRORLEVEL 1 GOTO ERROR
-
-set PARAMS=-t dsp --with-berkeley-db=%BDB_DIR% --with-libintl=%INTL_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR% --enable-bdb-in-apr-util
-IF NOT "%OPENSSL_DIR%"=="" set PARAMS=%PARAMS% --with-openssl=%OPENSSL_DIR%
-
-python gen-make.py %PARAMS%
-IF ERRORLEVEL 1 GOTO ERROR
-
-MSDEV.COM %HTTPD_SRC_DIR%\apache.dsw /MAKE "BuildBin - Win32 Release"
-IF ERRORLEVEL 1 GOTO ERROR
-
-MSDEV.COM subversion_msvc.dsw /USEENV /MAKE "__ALL_TESTS__ - Win32 Release"
-IF ERRORLEVEL 1 GOTO ERROR
-
-
-EXIT 0
-
-REM ----------------------------------------------------
-:ERROR
-ECHO.
-ECHO *** Whoops, something choked.
-ECHO.
-CD ..
-EXIT 1
-
-
-:openssl
-rem ====== Build openssl.
-pushd %OPENSSL_DIR%
-perl Configure VC-WIN32
-IF ERRORLEVEL 1 goto openssl-err1
-
-call ms\do_ms
-IF ERRORLEVEL 1 goto openssl-err1
-
-nmake -f ms\ntdll.mak /NOLOGO /S
-IF ERRORLEVEL 1 goto openssl-err1
-
-pushd out32dll
-call ..\ms\test
-IF ERRORLEVEL 1 goto openssl-err2
-
-popd
-popd
-EXIT /B 0
-
-:openssl-err2
-popd
-
-:openssl-err1
-popd
-EXIT 1
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+IF NOT "%OPENSSL_DIR%"=="" call :openssl
+IF ERRORLEVEL 1 GOTO ERROR
+
+set PARAMS=-t dsp --with-berkeley-db=%BDB_DIR% --with-libintl=%INTL_DIR% --with-zlib=%ZLIB_DIR% --with-httpd=%HTTPD_SRC_DIR% --with-neon=%NEON_DIR% --enable-bdb-in-apr-util
+IF NOT "%OPENSSL_DIR%"=="" set PARAMS=%PARAMS% --with-openssl=%OPENSSL_DIR%
+
+python gen-make.py %PARAMS%
+IF ERRORLEVEL 1 GOTO ERROR
+
+MSDEV.COM %HTTPD_SRC_DIR%\apache.dsw /MAKE "BuildBin - Win32 Release"
+IF ERRORLEVEL 1 GOTO ERROR
+
+MSDEV.COM subversion_msvc.dsw /USEENV /MAKE "__ALL_TESTS__ - Win32 Release"
+IF ERRORLEVEL 1 GOTO ERROR
+
+
+EXIT 0
+
+REM ----------------------------------------------------
+:ERROR
+ECHO.
+ECHO *** Whoops, something choked.
+ECHO.
+CD ..
+EXIT 1
+
+
+:openssl
+rem ====== Build openssl.
+pushd %OPENSSL_DIR%
+perl Configure VC-WIN32
+IF ERRORLEVEL 1 goto openssl-err1
+
+call ms\do_ms
+IF ERRORLEVEL 1 goto openssl-err1
+
+nmake -f ms\ntdll.mak /NOLOGO /S
+IF ERRORLEVEL 1 goto openssl-err1
+
+pushd out32dll
+call ..\ms\test
+IF ERRORLEVEL 1 goto openssl-err2
+
+popd
+popd
+EXIT /B 0
+
+:openssl-err2
+popd
+
+:openssl-err1
+popd
+EXIT 1
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/xp-vc60-ia32/svncheck.bat b/tools/buildbot/slaves/xp-vc60-ia32/svncheck.bat
index 70f102f..70e8768 100644
--- a/tools/buildbot/slaves/xp-vc60-ia32/svncheck.bat
+++ b/tools/buildbot/slaves/xp-vc60-ia32/svncheck.bat
@@ -1,51 +1,51 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-set FS_TYPE=%1
-set RA_TYPE=%2
-
-REM By default, return zero
-set ERR=0
-
-if "%RA_TYPE%"=="ra_local" goto ra_local
-if "%RA_TYPE%"=="ra_svn" goto ra_svn
-if "%RA_TYPE%"=="ra_dav" goto ra_dav
-
-echo Unknown ra method '%RA_TYPE%'
-EXIT 3
-
-:ra_local
-python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r
-if ERRORLEVEL 1 set ERR=1
-EXIT %ERR%
-
-:ra_svn
-python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r -u svn://localhost
-if ERRORLEVEL 1 set ERR=1
-EXIT %ERR%
-
-:ra_dav
-python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r --httpd-dir="%HTTPD_BIN_DIR%" --httpd-port 1234
-if ERRORLEVEL 1 set ERR=1
-EXIT %ERR%
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+set FS_TYPE=%1
+set RA_TYPE=%2
+
+REM By default, return zero
+set ERR=0
+
+if "%RA_TYPE%"=="ra_local" goto ra_local
+if "%RA_TYPE%"=="ra_svn" goto ra_svn
+if "%RA_TYPE%"=="ra_dav" goto ra_dav
+
+echo Unknown ra method '%RA_TYPE%'
+EXIT 3
+
+:ra_local
+python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r
+if ERRORLEVEL 1 set ERR=1
+EXIT %ERR%
+
+:ra_svn
+python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r -u svn://localhost
+if ERRORLEVEL 1 set ERR=1
+EXIT %ERR%
+
+:ra_dav
+python win-tests.py %TEST_DIR% -f %FS_TYPE% -c -r --httpd-dir="%HTTPD_BIN_DIR%" --httpd-port 1234
+if ERRORLEVEL 1 set ERR=1
+EXIT %ERR%
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/xp-vc60-ia32/svnclean.bat b/tools/buildbot/slaves/xp-vc60-ia32/svnclean.bat
index 071610d..64df9b8 100644
--- a/tools/buildbot/slaves/xp-vc60-ia32/svnclean.bat
+++ b/tools/buildbot/slaves/xp-vc60-ia32/svnclean.bat
@@ -1,28 +1,28 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-if NOT "%CLEAN_SVN%"=="" MSDEV.COM subversion_msvc.dsw /MAKE "__ALL_TESTS__ - Win32 Release" /CLEAN
-if ERRORLEVEL 1 EXIT 1
-
-EXIT 0
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+if NOT "%CLEAN_SVN%"=="" MSDEV.COM subversion_msvc.dsw /MAKE "__ALL_TESTS__ - Win32 Release" /CLEAN
+if ERRORLEVEL 1 EXIT 1
+
+EXIT 0
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/buildbot/slaves/xp-vc60-ia32/svnlog.bat b/tools/buildbot/slaves/xp-vc60-ia32/svnlog.bat
index e93a381..bb0d872 100644
--- a/tools/buildbot/slaves/xp-vc60-ia32/svnlog.bat
+++ b/tools/buildbot/slaves/xp-vc60-ia32/svnlog.bat
@@ -1,25 +1,25 @@
-REM Licensed to the Apache Software Foundation (ASF) under one
-REM or more contributor license agreements. See the NOTICE file
-REM distributed with this work for additional information
-REM regarding copyright ownership. The ASF licenses this file
-REM to you under the Apache License, Version 2.0 (the
-REM "License"); you may not use this file except in compliance
-REM with the License. You may obtain a copy of the License at
-REM
-REM http://www.apache.org/licenses/LICENSE-2.0
-REM
-REM Unless required by applicable law or agreed to in writing,
-REM software distributed under the License is distributed on an
-REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-REM KIND, either express or implied. See the License for the
-REM specific language governing permissions and limitations
-REM under the License.
-
-IF NOT EXIST ..\config.bat GOTO noconfig
-call ..\config.bat
-
-EXIT 0
-
-:noconfig
-echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
-EXIT 2
+REM Licensed to the Apache Software Foundation (ASF) under one
+REM or more contributor license agreements. See the NOTICE file
+REM distributed with this work for additional information
+REM regarding copyright ownership. The ASF licenses this file
+REM to you under the Apache License, Version 2.0 (the
+REM "License"); you may not use this file except in compliance
+REM with the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing,
+REM software distributed under the License is distributed on an
+REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+REM KIND, either express or implied. See the License for the
+REM specific language governing permissions and limitations
+REM under the License.
+
+IF NOT EXIST ..\config.bat GOTO noconfig
+call ..\config.bat
+
+EXIT 0
+
+:noconfig
+echo File config.bat not found. Please copy it from config.bat.tmpl and tweak for you.
+EXIT 2
diff --git a/tools/client-side/bash_completion b/tools/client-side/bash_completion
index e45c3f6..eabc15c 100644
--- a/tools/client-side/bash_completion
+++ b/tools/client-side/bash_completion
@@ -479,7 +479,7 @@ _svn()
[[ $previous = '--extensions' || $previous = '-x' ]] && \
values="--unified --ignore-space-change \
- --ignore-all-space --ignore-eol-style"
+ --ignore-all-space --ignore-eol-style --show-c-functions"
[[ $previous = '--depth' ]] && \
values='empty files immediates infinity'
@@ -494,8 +494,8 @@ _svn()
# from svn help resolve
values='base working mine-full theirs-full'
else # checkout merge switch update
- # not implemented yet: mine-conflict theirs-conflict
- values='postpone base mine-full theirs-full edit launch'
+ values="postpone base mine-full theirs-full edit launch \
+ mine-conflict theirs-conflict"
fi
}
@@ -647,14 +647,6 @@ _svn()
[[ ${COMPREPLY} ]] && return 0
fi
- # force mandatory --accept option for 'resolve' command
- if [[ $cmd = 'resolve' && ! $acceptOpt ]]
- then
- COMPREPLY=( $( compgen -W '--accept' -- $cur ) )
- # force option now! others will be available on later completions
- return 0
- fi
-
# maximum number of additional arguments expected in various forms
case $cmd in
merge)
@@ -789,7 +781,7 @@ _svn()
# otherwise build possible options for the command
pOpts="--username --password --no-auth-cache --non-interactive \
- --trust-server-cert"
+ --trust-server-cert --force-interactive"
mOpts="-m --message -F --file --encoding --force-log --with-revprop"
rOpts="-r --revision"
qOpts="-q --quiet"
@@ -826,7 +818,8 @@ _svn()
;;
commit|ci)
cmdOpts="$mOpts $qOpts $nOpts --targets --editor-cmd $pOpts \
- --no-unlock $cOpts --keep-changelists"
+ --no-unlock $cOpts --keep-changelists \
+ --include-externals"
;;
copy|cp)
cmdOpts="$mOpts $rOpts $qOpts --editor-cmd $pOpts --parents \
@@ -840,7 +833,9 @@ _svn()
cmdOpts="$rOpts -x --extensions --diff-cmd --no-diff-deleted \
$nOpts $pOpts --force --old --new --notice-ancestry \
-c --change --summarize $cOpts --xml --git \
- --internal-diff --show-copies-as-adds"
+ --internal-diff --show-copies-as-adds \
+ --ignore-properties --properties-only --no-diff-added \
+ --patch-compatible"
;;
export)
cmdOpts="$rOpts $qOpts $pOpts $nOpts --force --native-eol \
@@ -859,7 +854,7 @@ _svn()
;;
list|ls)
cmdOpts="$rOpts -v --verbose -R --recursive $pOpts \
- --incremental --xml --depth"
+ --incremental --xml --depth --include-externals"
;;
lock)
cmdOpts="-m --message -F --file --encoding --force-log \
@@ -870,13 +865,13 @@ _svn()
--incremental --xml $qOpts -l --limit -c --change \
$gOpts --with-all-revprops --with-revprop --depth \
--diff --diff-cmd -x --extensions --internal-diff \
- --with-no-revprops"
+ --with-no-revprops --search --search-and"
;;
merge)
cmdOpts="$rOpts $nOpts $qOpts --force --dry-run --diff3-cmd \
$pOpts --ignore-ancestry -c --change -x --extensions \
--record-only --accept --reintegrate \
- --allow-mixed-revisions"
+ --allow-mixed-revisions -v --verbose"
;;
mergeinfo)
cmdOpts="$rOpts $pOpts --depth --show-revs -R --recursive"
@@ -886,10 +881,11 @@ _svn()
;;
move|mv|rename|ren)
cmdOpts="$mOpts $rOpts $qOpts --force --editor-cmd $pOpts \
- --parents"
+ --parents --allow-mixed-revisions"
;;
patch)
- cmdOpts="$qOpts $pOpts --dry-run --ignore-whitespace --reverse-diff --strip"
+ cmdOpts="$qOpts $pOpts --dry-run --ignore-whitespace \
+ --reverse-diff --strip"
;;
propdel|pdel|pd)
cmdOpts="$qOpts -R --recursive $rOpts $pOpts $cOpts \
@@ -902,13 +898,13 @@ _svn()
cmdOpts="$cmdOpts --revprop $rOpts"
;;
propget|pget|pg)
- cmdOpts="-v --verbose -R --recursive $rOpts --strict $pOpts $cOpts \
- --depth --xml"
+ cmdOpts="-v --verbose -R --recursive $rOpts --strict \
+ $pOpts $cOpts --depth --xml --show-inherited-props"
[[ $isRevProp || ! $prop ]] && cmdOpts="$cmdOpts --revprop"
;;
proplist|plist|pl)
cmdOpts="-v --verbose -R --recursive $rOpts --revprop $qOpts \
- $pOpts $cOpts --depth --xml"
+ $pOpts $cOpts --depth --xml --show-inherited-props"
;;
propset|pset|ps)
cmdOpts="$qOpts --targets -R --recursive \
@@ -1034,9 +1030,9 @@ _svnadmin ()
cur=${COMP_WORDS[COMP_CWORD]}
# Possible expansions, without pure-prefix abbreviations such as "h".
- cmds='crashtest create deltify dump help hotcopy list-dblogs \
- list-unused-dblogs load lslocks lstxns pack recover rmlocks \
- rmtxns setlog setrevprop setuuid upgrade verify --version'
+ cmds='crashtest create deltify dump freeze help hotcopy list-dblogs \
+ list-unused-dblogs load lock lslocks lstxns pack recover rmlocks \
+ rmtxns setlog setrevprop setuuid unlock upgrade verify --version'
if [[ $COMP_CWORD -eq 1 ]] ; then
COMPREPLY=( $( compgen -W "$cmds" -- $cur ) )
@@ -1045,7 +1041,8 @@ _svnadmin ()
# options that require a parameter
# note: continued lines must end '|' continuing lines must start '|'
- optsParam="-r|--revision|--parent-dir|--fs-type"
+ optsParam="-r|--revision|--parent-dir|--fs-type|-M|--memory-cache-size"
+ optsParam="$optsParam|-F|--file"
# if not typing an option, or if the previous option required a
# parameter, then fallback on ordinary filename expansion
@@ -1060,13 +1057,18 @@ _svnadmin ()
case ${COMP_WORDS[1]} in
create)
cmdOpts="--bdb-txn-nosync --bdb-log-keep --config-dir \
- --fs-type --pre-1.4-compatible --pre-1.5-compatible"
+ --fs-type --pre-1.4-compatible --pre-1.5-compatible \
+ --pre-1.6-compatible --compatible-version"
;;
deltify)
cmdOpts="-r --revision -q --quiet"
;;
dump)
- cmdOpts="-r --revision --incremental -q --quiet --deltas"
+ cmdOpts="-r --revision --incremental -q --quiet --deltas \
+ -M --memory-cache-size"
+ ;;
+ freeze)
+ cmdOpts="-F --file"
;;
help|h|\?)
cmdOpts="$cmds"
@@ -1076,7 +1078,11 @@ _svnadmin ()
;;
load)
cmdOpts="--ignore-uuid --force-uuid --parent-dir -q --quiet \
- --use-pre-commit-hook --use-post-commit-hook"
+ --use-pre-commit-hook --use-post-commit-hook \
+ --bypass-prop-validation -M --memory-cache-size"
+ ;;
+ lock|unlock)
+ cmdOpts="--bypass-hooks"
;;
recover)
cmdOpts="--wait"
@@ -1120,6 +1126,10 @@ _svnadmin ()
--help) cmdOpts=${cmdOpts/ -h / } ;;
-r) cmdOpts=${cmdOpts/ --revision / } ;;
--revision) cmdOpts=${cmdOpts/ -r / } ;;
+ -F) cmdOpts=${cmdOpts/ --file / } ;;
+ --file) cmdOpts=${cmdOpts/ -F / } ;;
+ -M) cmdOpts=${cmdOpts/ --memory-cache-size / } ;;
+ --memory-cache-size) cmdOpts=${cmdOpts/ --M / } ;;
esac
# skip next option if this one requires a parameter
diff --git a/tools/client-side/detach.py b/tools/client-side/detach.py
new file mode 100755
index 0000000..84c725a
--- /dev/null
+++ b/tools/client-side/detach.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+# TODO: if this was part of core subversion, we'd have all sorts of nifty
+# checks, and could use a lot of existing code.
+
+import os
+import re
+import sys
+import shutil
+import sqlite3
+
+
+def usage():
+ print("""usage: %s WC_SRC TARGET
+
+Detatch the working copy subdirectory given by WC_SRC to TARGET. This is
+equivalent to copying WC_SRC to TARGET, but it inserts a new set of Subversion
+metadata into TARGET/.svn, making TARGET a proper independent working copy.
+""" % sys.argv[0])
+ sys.exit(1)
+
+
+def find_wcroot(wcdir):
+ wcroot = os.path.abspath(wcdir)
+ old_wcroot = ''
+ while wcroot != old_wcroot:
+ if os.path.exists(os.path.join(wcroot, '.svn', 'wc.db')):
+ return wcroot
+
+ old_wcroot = wcroot
+ wcroot = os.path.dirname(wcroot)
+
+ return None
+
+
+def migrate_sqlite(wc_src, target, wcroot):
+ src_conn = sqlite3.connect(os.path.join(wcroot, '.svn', 'wc.db'))
+ dst_conn = sqlite3.connect(os.path.join(target, '.svn', 'wc.db'))
+
+ local_relsrc = os.path.relpath(wc_src, wcroot)
+
+ src_c = src_conn.cursor()
+ dst_c = dst_conn.cursor()
+
+ # We're only going to attempt this if there are no locks or work queue
+ # items in the source database
+ ### This could probably be tightened up, but for now this suffices
+ src_c.execute('select count(*) from wc_lock')
+ count = int(src_c.fetchone()[0])
+ assert count == 0
+
+ src_c.execute('select count(*) from work_queue')
+ count = int(src_c.fetchone()[0])
+ assert count == 0
+
+ # Copy over the schema
+ src_c.execute('pragma user_version')
+ user_version = src_c.fetchone()[0]
+ # We only know how to handle format 29 working copies
+ assert user_version == 29
+ ### For some reason, sqlite doesn't like to parameterize the pragma statement
+ dst_c.execute('pragma user_version = %d' % user_version)
+
+ src_c.execute('select name, sql from sqlite_master')
+ for row in src_c:
+ if not row[0].startswith('sqlite_'):
+ dst_c.execute(row[1])
+
+ # Insert wcroot row
+ dst_c.execute('insert into wcroot (id, local_abspath) values (?, ?)',
+ (1, None))
+
+ # Copy repositories rows
+ ### Perhaps prune the repositories based upon the new NODES set?
+ src_c.execute('select * from repository')
+ for row in src_c:
+ dst_c.execute('insert into repository values (?, ?, ?)',
+ row)
+
+ # Copy the root node
+ src_c.execute('select * from nodes where local_relpath = ?',
+ (local_relsrc,))
+ row = list(src_c.fetchone())
+ row[1] = ''
+ row[3] = None
+ dst_c.execute('''insert into nodes values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
+ ?, ?, ?, ?, ?, ?, ?, ?)''', row)
+
+ # Copy children nodes rows
+ src_c.execute('select * from nodes where local_relpath like ?',
+ (local_relsrc + '/%', ))
+ for row in src_c:
+ row = list(row)
+ row[1] = row[1][len(local_relsrc) + 1:]
+ row[3] = row[3][len(local_relsrc) + 1:]
+ dst_c.execute('''insert into nodes values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
+ ?, ?, ?, ?, ?, ?, ?, ?)''',
+ row)
+
+ # Copy root actual_node
+ src_c.execute('select * from actual_node where local_relpath = ?',
+ (local_relsrc, ))
+ row = src_c.fetchone()
+ if row:
+ row = list(row)
+ row[1] = ''
+ row[2] = None
+ dst_c.execute('''insert into actual_node values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', row)
+
+ src_c.execute('select * from actual_node where local_relpath like ?',
+ (local_relsrc + '/%', ))
+ for row in src_c:
+ row = list(row)
+ row[1] = row[1][len(local_relsrc) + 1:]
+ row[2] = row[2][len(local_relsrc) + 1:]
+ dst_c.execute('''insert into actual_node values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', row)
+
+ # Hard to know which locks we care about, so just copy 'em all (there aren't
+ # likely to be many)
+ src_c.execute('select * from lock')
+ for row in src_c:
+ dst_c.execute('insert into locks values (?, ?, ?, ?, ?, ?)', row)
+
+ # EXTERNALS
+ src_c.execute('select * from externals where local_relpath = ?',
+ (local_relsrc, ))
+ row = src_c.fetchone()
+ if row:
+ row = list(row)
+ row[1] = ''
+ row[2] = None
+ dst_c.execute('''insert into externals values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', row)
+
+ src_c.execute('select * from externals where local_relpath like ?',
+ (local_relsrc + '/%', ))
+ for row in src_c:
+ row = list(row)
+ row[1] = row[1][len(local_relsrc) + 1:]
+ row[2] = row[2][len(local_relsrc) + 1:]
+ dst_c.execute('''insert into externals values
+ (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', row)
+
+ dst_conn.commit()
+ src_conn.close()
+ dst_conn.close()
+
+
+def migrate_pristines(wc_src, target, wcroot):
+ src_conn = sqlite3.connect(os.path.join(wcroot, '.svn', 'wc.db'))
+ dst_conn = sqlite3.connect(os.path.join(target, '.svn', 'wc.db'))
+
+ src_c = src_conn.cursor()
+ dst_c = dst_conn.cursor()
+
+ regex = re.compile('\$((?:md5 *)|(?:sha1))\$(.*)')
+ src_proot = os.path.join(wcroot, '.svn', 'pristine')
+ target_proot = os.path.join(target, '.svn', 'pristine')
+
+ checksums = {}
+
+ # Grab anything which needs a pristine
+ src_c.execute('''select checksum from nodes
+ union
+ select older_checksum from actual_node
+ union
+ select left_checksum from actual_node
+ union
+ select right_checksum from actual_node''')
+ for row in src_c:
+ if row[0]:
+ match = regex.match(row[0])
+ assert match
+
+ pristine = match.group(2)
+ if pristine in checksums:
+ checksums[pristine] += 1
+ else:
+ checksums[pristine] = 1
+
+ for pristine, count in checksums.items():
+ # Copy the pristines themselves over
+ pdir = os.path.join(target_proot, pristine[0:2])
+ if not os.path.exists(pdir):
+ os.mkdir(pdir)
+ path = os.path.join(pristine[0:2], pristine + '.svn-base')
+ if os.path.exists(os.path.join(target_proot, path)):
+ dst_c.execute
+ else:
+ shutil.copy2(os.path.join(src_proot, path),
+ os.path.join(target_proot, path))
+
+ src_c.execute('select size, md5_checksum from pristine where checksum=?',
+ ('$sha1$' + pristine, ) )
+ (size, md5) = src_c.fetchone()
+
+ # Insert a db row for the pristine
+ dst_c.execute('insert into pristine values (?, NULL, ?, ?, ?)',
+ ('$sha1$' + pristine, size, count, md5))
+
+ dst_conn.commit()
+ src_conn.close()
+ dst_conn.close()
+
+
+def migrate_metadata(wc_src, target, wcroot):
+ # Make paths
+ os.mkdir(os.path.join(target, '.svn'))
+ os.mkdir(os.path.join(target, '.svn', 'tmp'))
+ os.mkdir(os.path.join(target, '.svn', 'pristine'))
+ open(os.path.join(target, '.svn', 'format'), 'w').write('12')
+ open(os.path.join(target, '.svn', 'entries'), 'w').write('12')
+
+ # Two major bits: sqlite data and pristines
+ migrate_sqlite(wc_src, os.path.abspath(target), wcroot)
+ migrate_pristines(wc_src, target, wcroot)
+
+
+def main():
+ if len(sys.argv) < 3:
+ usage()
+
+ wc_src = os.path.normpath(sys.argv[1])
+ if not os.path.isdir(wc_src):
+ print("%s does not exist or is not a directory" % wc_src)
+ sys.exit(1)
+
+ target = os.path.normpath(sys.argv[2])
+ if os.path.exists(target):
+ print("Target '%s' already exists" % target)
+ sys.exit(1)
+
+ wcroot = find_wcroot(wc_src)
+ if not wcroot:
+ print("'%s' is not part of a working copy" % wc_src)
+ sys.exit(1)
+
+ # Use the OS to copy the subdirectory over to the target
+ shutil.copytree(wc_src, target)
+
+ # Now migrate the worky copy data
+ migrate_metadata(wc_src, target, wcroot)
+
+
+if __name__ == '__main__':
+ raise Exception("""This script is unfinished and not ready to be used on live data.
+ Trust us.""")
+ main()
diff --git a/tools/client-side/mergeinfo-sanitizer.py b/tools/client-side/mergeinfo-sanitizer.py
new file mode 100755
index 0000000..54d415c
--- /dev/null
+++ b/tools/client-side/mergeinfo-sanitizer.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+import svn
+import sys
+import os
+import getopt
+import hashlib
+import pickle
+import getpass
+from svn import client, core, ra, wc
+
+## This script first fetches the mergeinfo of the working copy and tries
+## to fetch the location segments for the source paths in the respective
+## revisions present in the mergeinfo. With the obtained location segments
+## result, it creates a new mergeinfo. The depth is infinity by default.
+## This script would stop proceeding if there are any local modifications in the
+## working copy.
+
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+mergeinfo = {}
+
+def usage():
+ sys.stderr.write(""" Usage: %s WCPATH [OPTION]
+
+Analyze the mergeinfo property of the given WCPATH.
+Look for the existence of merge_source's locations at their recorded
+merge ranges. If non-existent merge source is found fix the mergeinfo.
+
+Valid Options:
+ -f [--fix] : set the svn:mergeinfo property. Not committing the changes.
+ -h [--help] : display the usage
+
+""" % os.path.basename(sys.argv[0]) )
+
+
+##
+# This function would 'svn propset' the new mergeinfo to the working copy
+##
+def set_new_mergeinfo(wcpath, newmergeinfo, ctx):
+ client.propset3("svn:mergeinfo", newmergeinfo, wcpath, core.svn_depth_empty,
+ 0, core.SVN_INVALID_REVNUM, None, None, ctx)
+
+
+##
+# Returns the md5 hash of the file
+##
+def md5_of_file(f, block_size = 2*20):
+ md5 = hashlib.md5()
+ while True:
+ data = f.read(block_size)
+ if not data:
+ break
+ md5.update(data)
+ return md5.digest()
+
+
+
+def hasher(hash_file, newmergeinfo_file):
+ new_mergeinfo = core.svn_mergeinfo_to_string(mergeinfo)
+ with open(newmergeinfo_file, "a") as buffer_file:
+ pickle.dump(new_mergeinfo, buffer_file)
+ buffer_file.close()
+
+ with open(newmergeinfo_file, "rb") as buffer_file:
+ hash_of_buffer_file = md5_of_file(buffer_file)
+ buffer_file.close()
+
+ with open(hash_file, "w") as hash_file:
+ pickle.dump(hash_of_buffer_file, hash_file)
+ hash_file.close()
+
+
+def location_segment_callback(segment, pool):
+ if segment.path is not None:
+ source_path = '/' + segment.path
+ path_ranges = mergeinfo.get(source_path, [])
+ range = svn.core.svn_merge_range_t()
+ range.start = segment.range_start - 1
+ range.end = segment.range_end
+ range.inheritable = 1
+ path_ranges.append(range)
+ mergeinfo[source_path] = path_ranges
+
+##
+# This function does the authentication in an interactive way
+##
+def prompt_func_ssl_unknown_cert(realm, failures, cert_info, may_save, pool):
+ print "The certificate details are as follows:"
+ print "--------------------------------------"
+ print "Issuer : " + str(cert_info.issuer_dname)
+ print "Hostname : " + str(cert_info.hostname)
+ print "ValidFrom : " + str(cert_info.valid_from)
+ print "ValidUpto : " + str(cert_info.valid_until)
+ print "Fingerprint: " + str(cert_info.fingerprint)
+ print ""
+ ssl_trust = core.svn_auth_cred_ssl_server_trust_t()
+ if may_save:
+ choice = raw_input( "accept (t)temporarily (p)permanently: ")
+ else:
+ choice = raw_input( "(r)Reject or accept (t)temporarily: ")
+ if choice[0] == "t" or choice[0] == "T":
+ ssl_trust.may_save = False
+ ssl_trust.accepted_failures = failures
+ elif choice[0] == "p" or choice[0] == "P":
+ ssl_trust.may_save = True
+ ssl_trust.accepted_failures = failures
+ else:
+ ssl_trust = None
+ return ssl_trust
+
+def prompt_func_simple_prompt(realm, username, may_save, pool):
+ username = raw_input("username: ")
+ password = getpass.getpass(prompt="password: ")
+ simple_cred = core.svn_auth_cred_simple_t()
+ simple_cred.username = username
+ simple_cred.password = password
+ simple_cred.may_save = False
+ return simple_cred
+
+##
+# This function tries to authenticate(if needed) and fetch the
+# location segments for the available mergeinfo and create a new
+# mergeinfo dictionary
+##
+def get_new_location_segments(parsed_original_mergeinfo, repo_root,
+ wcpath, ctx):
+
+ for path in parsed_original_mergeinfo:
+ full_url = repo_root + path
+ ra_callbacks = ra.callbacks_t()
+ ra_callbacks.auth_baton = core.svn_auth_open([
+ core.svn_auth_get_ssl_server_trust_file_provider(),
+ core.svn_auth_get_simple_prompt_provider(prompt_func_simple_prompt, 2),
+ core.svn_auth_get_ssl_server_trust_prompt_provider(prompt_func_ssl_unknown_cert),
+ svn.client.get_simple_provider(),
+ svn.client.get_username_provider()
+ ])
+ try:
+ ctx.config = core.svn_config_get_config(None)
+ ra_session = ra.open(full_url, ra_callbacks, None, ctx.config)
+
+ for revision_range in parsed_original_mergeinfo[path]:
+ try:
+ ra.get_location_segments(ra_session, "", revision_range.end,
+ revision_range.end, revision_range.start + 1, location_segment_callback)
+ except svn.core.SubversionException:
+ sys.stderr.write(" Could not find location segments for %s \n" % path)
+ except Exception, e:
+ sys.stderr.write("")
+
+
+def sanitize_mergeinfo(parsed_original_mergeinfo, repo_root, wcpath,
+ ctx, hash_file, newmergeinfo_file, temp_pool):
+ full_mergeinfo = {}
+ for entry in parsed_original_mergeinfo:
+ get_new_location_segments(parsed_original_mergeinfo[entry], repo_root, wcpath, ctx)
+ full_mergeinfo.update(parsed_original_mergeinfo[entry])
+
+ hasher(hash_file, newmergeinfo_file)
+ diff_mergeinfo = core.svn_mergeinfo_diff(full_mergeinfo,
+ mergeinfo, 1, temp_pool)
+ #There should be no mergeinfo added by our population. There should only
+ #be deletion of mergeinfo. so take it from diff_mergeinfo[0]
+ print "The bogus mergeinfo summary:"
+ bogus_mergeinfo_deleted = diff_mergeinfo[0]
+ for bogus_mergeinfo_path in bogus_mergeinfo_deleted:
+ sys.stdout.write(bogus_mergeinfo_path + ": ")
+ for revision_range in bogus_mergeinfo_deleted[bogus_mergeinfo_path]:
+ sys.stdout.write(str(revision_range.start + 1) + "-" + str(revision_range.end) + ",")
+ print ""
+
+##
+# This function tries to 'propset the new mergeinfo into the working copy.
+# It reads the new mergeinfo from the .newmergeinfo file and verifies its
+# hash against the hash in the .hashfile
+##
+def fix_sanitized_mergeinfo(parsed_original_mergeinfo, repo_root, wcpath,
+ ctx, hash_file, newmergeinfo_file, temp_pool):
+ has_local_modification = check_local_modifications(wcpath, temp_pool)
+ old_hash = ''
+ new_hash = ''
+ try:
+ with open(hash_file, "r") as f:
+ old_hash = pickle.load(f)
+ f.close
+ except IOError, e:
+ get_new_location_segments(parsed_original_mergeinfo, repo_root, wcpath, ctx)
+ hasher(hash_file, newmergeinfo_file)
+ try:
+ with open(hash_file, "r") as f:
+ old_hash = pickle.load(f)
+ f.close
+ except IOError:
+ hasher(hash_file, newmergeinfo_file)
+ try:
+ with open(newmergeinfo_file, "r") as f:
+ new_hash = md5_of_file(f)
+ f.close
+ except IOError, e:
+ if not mergeinfo:
+ get_new_location_segments(parsed_original_mergeinfo, repo_root, wcpath, ctx)
+ hasher(hash_file, newmergeinfo_file)
+ with open(newmergeinfo_file, "r") as f:
+ new_hash = md5_of_file(f)
+ f.close
+ if old_hash == new_hash:
+ with open(newmergeinfo_file, "r") as f:
+ newmergeinfo = pickle.load(f)
+ f.close
+ set_new_mergeinfo(wcpath, newmergeinfo, ctx)
+ if os.path.exists(newmergeinfo_file):
+ os.remove(newmergeinfo_file)
+ os.remove(hash_file)
+ else:
+ print "The hashes are not matching. Probable chance of unwanted tweaking in the mergeinfo"
+
+
+##
+# This function checks the working copy for any local modifications
+##
+def check_local_modifications(wcpath, temp_pool):
+ has_local_mod = wc.svn_wc_revision_status(wcpath, None, 0, None, temp_pool)
+ if has_local_mod.modified:
+ print """The working copy has local modifications. Please revert them or clean
+the working copy before running the script."""
+ sys.exit(1)
+
+def get_original_mergeinfo(wcpath, revision, depth, ctx, temp_pool):
+ propget_list = client.svn_client_propget3("svn:mergeinfo", wcpath,
+ revision, revision, depth, None,
+ ctx, temp_pool)
+
+ pathwise_mergeinfo = ""
+ pathwise_mergeinfo_list = []
+ mergeinfo_catalog = propget_list[0]
+ mergeinfo_catalog_dict = {}
+ for entry in mergeinfo_catalog:
+ mergeinfo_catalog_dict[entry] = core.svn_mergeinfo_parse(mergeinfo_catalog[entry], temp_pool)
+ return mergeinfo_catalog_dict
+
+
+def main():
+ try:
+ opts, args = my_getopt(sys.argv[1:], "h?f", ["help", "fix"])
+ except Exception, e:
+ sys.stderr.write(""" Improperly used """)
+ sys.exit(1)
+
+ if len(args) == 1:
+ wcpath = args[0]
+ wcpath = os.path.abspath(wcpath)
+ else:
+ usage()
+ sys.exit(1)
+
+ fix = 0
+ current_path = os.getcwd()
+ hash_file = os.path.join(current_path, ".hashfile")
+ newmergeinfo_file = os.path.join(current_path, ".newmergeinfo")
+
+ temp_pool = core.svn_pool_create()
+ ctx = client.svn_client_create_context(temp_pool)
+ depth = core.svn_depth_infinity
+ revision = core.svn_opt_revision_t()
+ revision.kind = core.svn_opt_revision_unspecified
+
+ for opt, values in opts:
+ if opt == "--help" or opt in ("-h", "-?"):
+ usage()
+ elif opt == "--fix" or opt == "-f":
+ fix = 1
+
+ # Check for any local modifications in the working copy
+ check_local_modifications(wcpath, temp_pool)
+
+ parsed_original_mergeinfo = get_original_mergeinfo(wcpath, revision,
+ depth, ctx, temp_pool)
+
+ repo_root = client.svn_client_root_url_from_path(wcpath, ctx, temp_pool)
+
+ core.svn_config_ensure(None)
+
+ if fix == 0:
+ sanitize_mergeinfo(parsed_original_mergeinfo, repo_root, wcpath, ctx,
+ hash_file, newmergeinfo_file, temp_pool)
+ if fix == 1:
+ fix_sanitized_mergeinfo(parsed_original_mergeinfo, repo_root, wcpath,
+ ctx, hash_file, newmergeinfo_file, temp_pool)
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except KeyboardInterrupt:
+ print ""
+ sys.stderr.write("The script is interrupted and stopped manually.")
+ print ""
+
diff --git a/tools/client-side/svn-bench/cl.h b/tools/client-side/svn-bench/cl.h
new file mode 100644
index 0000000..7a1e48d
--- /dev/null
+++ b/tools/client-side/svn-bench/cl.h
@@ -0,0 +1,198 @@
+/*
+ * cl.h: shared stuff in the command line program
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+#ifndef SVN_CL_H
+#define SVN_CL_H
+
+/*** Includes. ***/
+
+#include <apr_tables.h>
+
+#include "svn_client.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/*** Command dispatch. ***/
+
+/* Hold results of option processing that are shared by multiple
+ commands. */
+typedef struct svn_cl__opt_state_t
+{
+ /* An array of svn_opt_revision_range_t *'s representing revisions
+ ranges indicated on the command-line via the -r and -c options.
+ For each range in the list, if only one revision was provided
+ (-rN), its 'end' member remains 'svn_opt_revision_unspecified'.
+ This array always has at least one element, even if that is a
+ null range in which both ends are 'svn_opt_revision_unspecified'. */
+ apr_array_header_t *revision_ranges;
+
+ /* These are simply a copy of the range start and end values present
+ in the first item of the revision_ranges list. */
+ svn_opt_revision_t start_revision;
+ svn_opt_revision_t end_revision;
+
+ /* Flag which is only set if the '-c' option was used. */
+ svn_boolean_t used_change_arg;
+
+ /* Flag which is only set if the '-r' option was used. */
+ svn_boolean_t used_revision_arg;
+
+ /* Max number of log messages to get back from svn_client_log2. */
+ int limit;
+
+ /* After option processing is done, reflects the switch actually
+ given on the command line, or svn_depth_unknown if none. */
+ svn_depth_t depth;
+
+ svn_boolean_t quiet; /* sssh...avoid unnecessary output */
+ svn_boolean_t non_interactive; /* do no interactive prompting */
+ svn_boolean_t version; /* print version information */
+ svn_boolean_t verbose; /* be verbose */
+ svn_boolean_t strict; /* do strictly what was requested */
+ const char *encoding; /* the locale/encoding of the data*/
+ svn_boolean_t help; /* print usage message */
+ const char *auth_username; /* auth username */ /* UTF-8! */
+ const char *auth_password; /* auth password */ /* UTF-8! */
+ const char *extensions; /* subprocess extension args */ /* UTF-8! */
+ apr_array_header_t *targets; /* target list from file */ /* UTF-8! */
+ svn_boolean_t no_auth_cache; /* do not cache authentication information */
+ svn_boolean_t stop_on_copy; /* don't cross copies during processing */
+ const char *config_dir; /* over-riding configuration directory */
+ apr_array_header_t *config_options; /* over-riding configuration options */
+ svn_boolean_t all_revprops; /* retrieve all revprops */
+ svn_boolean_t no_revprops; /* retrieve no revprops */
+ apr_hash_t *revprop_table; /* table of revision properties to get/set */
+ svn_boolean_t use_merge_history; /* use/display extra merge information */
+ svn_boolean_t trust_server_cert; /* trust server SSL certs that would
+ otherwise be rejected as "untrusted" */
+} svn_cl__opt_state_t;
+
+
+typedef struct svn_cl__cmd_baton_t
+{
+ svn_cl__opt_state_t *opt_state;
+ svn_client_ctx_t *ctx;
+} svn_cl__cmd_baton_t;
+
+
+/* Declare all the command procedures */
+svn_opt_subcommand_t
+ svn_cl__help,
+ svn_cl__null_export,
+ svn_cl__null_list,
+ svn_cl__null_log;
+
+
+/* See definition in main.c for documentation. */
+extern const svn_opt_subcommand_desc2_t svn_cl__cmd_table[];
+
+/* See definition in main.c for documentation. */
+extern const int svn_cl__global_options[];
+
+/* See definition in main.c for documentation. */
+extern const apr_getopt_option_t svn_cl__options[];
+
+
+/* A helper for the many subcommands that wish to merely warn when
+ * invoked on an unversioned, nonexistent, or otherwise innocuously
+ * errorful resource. Meant to be wrapped with SVN_ERR().
+ *
+ * If ERR is null, return SVN_NO_ERROR.
+ *
+ * Else if ERR->apr_err is one of the error codes supplied in varargs,
+ * then handle ERR as a warning (unless QUIET is true), clear ERR, and
+ * return SVN_NO_ERROR, and push the value of ERR->apr_err into the
+ * ERRORS_SEEN array, if ERRORS_SEEN is not NULL.
+ *
+ * Else return ERR.
+ *
+ * Typically, error codes like SVN_ERR_UNVERSIONED_RESOURCE,
+ * SVN_ERR_ENTRY_NOT_FOUND, etc, are supplied in varargs. Don't
+ * forget to terminate the argument list with SVN_NO_ERROR.
+ */
+svn_error_t *
+svn_cl__try(svn_error_t *err,
+ apr_array_header_t *errors_seen,
+ svn_boolean_t quiet,
+ ...);
+
+
+/* Our cancellation callback. */
+svn_error_t *
+svn_cl__check_cancel(void *baton);
+
+
+
+/*** Notification functions to display results on the terminal. */
+
+/* Set *NOTIFY_FUNC_P and *NOTIFY_BATON_P to a notifier/baton for all
+ * operations, allocated in POOL.
+ */
+svn_error_t *
+svn_cl__get_notifier(svn_wc_notify_func2_t *notify_func_p,
+ void **notify_baton_p,
+ apr_pool_t *pool);
+
+/* Make the notifier for use with BATON print the appropriate summary
+ * line at the end of the output.
+ */
+svn_error_t *
+svn_cl__notifier_mark_export(void *baton);
+
+/* Like svn_client_args_to_target_array() but, if the only error is that some
+ * arguments are reserved file names, then print warning messages for those
+ * targets, store the rest of the targets in TARGETS_P and return success. */
+svn_error_t *
+svn_cl__args_to_target_array_print_reserved(apr_array_header_t **targets_p,
+ apr_getopt_t *os,
+ const apr_array_header_t *known_targets,
+ svn_client_ctx_t *ctx,
+ svn_boolean_t keep_dest_origpath_on_truepath_collision,
+ apr_pool_t *pool);
+
+/* Return an error if TARGET is a URL; otherwise return SVN_NO_ERROR. */
+svn_error_t *
+svn_cl__check_target_is_local_path(const char *target);
+
+/* Return a copy of PATH, converted to the local path style, skipping
+ * PARENT_PATH if it is non-null and is a parent of or equal to PATH.
+ *
+ * This function assumes PARENT_PATH and PATH are both absolute "dirents"
+ * or both relative "dirents". */
+const char *
+svn_cl__local_style_skip_ancestor(const char *parent_path,
+ const char *path,
+ apr_pool_t *pool);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* SVN_CL_H */
diff --git a/tools/client-side/svn-bench/client_errors.h b/tools/client-side/svn-bench/client_errors.h
new file mode 100644
index 0000000..19f0bdf
--- /dev/null
+++ b/tools/client-side/svn-bench/client_errors.h
@@ -0,0 +1,97 @@
+/*
+ * client_errors.h: error codes this command line client features
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+#ifndef SVN_CLIENT_ERRORS_H
+#define SVN_CLIENT_ERRORS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*
+ * This error defining system is copied from and explained in
+ * ../../include/svn_error_codes.h
+ */
+
+/* Process this file if we're building an error array, or if we have
+ not defined the enumerated constants yet. */
+#if defined(SVN_ERROR_BUILD_ARRAY) || !defined(SVN_CMDLINE_ERROR_ENUM_DEFINED)
+
+#if defined(SVN_ERROR_BUILD_ARRAY)
+
+#error "Need to update err_defn for r1464679 and un-typo 'CDMLINE'"
+
+#define SVN_ERROR_START \
+ static const err_defn error_table[] = { \
+ { SVN_ERR_CDMLINE__WARNING, "Warning" },
+#define SVN_ERRDEF(n, s) { n, s },
+#define SVN_ERROR_END { 0, NULL } };
+
+#elif !defined(SVN_CMDLINE_ERROR_ENUM_DEFINED)
+
+#define SVN_ERROR_START \
+ typedef enum svn_client_errno_t { \
+ SVN_ERR_CDMLINE__WARNING = SVN_ERR_LAST + 1,
+#define SVN_ERRDEF(n, s) n,
+#define SVN_ERROR_END SVN_ERR_CMDLINE__ERR_LAST } svn_client_errno_t;
+
+#define SVN_CMDLINE_ERROR_ENUM_DEFINED
+
+#endif
+
+/* Define custom command line client error numbers */
+
+SVN_ERROR_START
+
+ /* BEGIN Client errors */
+
+SVN_ERRDEF(SVN_ERR_CMDLINE__TMPFILE_WRITE,
+ "Failed writing to temporary file.")
+
+ SVN_ERRDEF(SVN_ERR_CMDLINE__TMPFILE_STAT,
+ "Failed getting info about temporary file.")
+
+ SVN_ERRDEF(SVN_ERR_CMDLINE__TMPFILE_OPEN,
+ "Failed opening temporary file.")
+
+ /* END Client errors */
+
+
+SVN_ERROR_END
+
+#undef SVN_ERROR_START
+#undef SVN_ERRDEF
+#undef SVN_ERROR_END
+
+#endif /* SVN_ERROR_BUILD_ARRAY || !SVN_CMDLINE_ERROR_ENUM_DEFINED */
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* SVN_CLIENT_ERRORS_H */
diff --git a/tools/client-side/svn-bench/help-cmd.c b/tools/client-side/svn-bench/help-cmd.c
new file mode 100644
index 0000000..a3302ec
--- /dev/null
+++ b/tools/client-side/svn-bench/help-cmd.c
@@ -0,0 +1,94 @@
+/*
+ * help-cmd.c -- Provide help
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#include "svn_string.h"
+#include "svn_error.h"
+#include "svn_version.h"
+#include "cl.h"
+
+#include "svn_private_config.h"
+
+
+/*** Code. ***/
+
+/* This implements the `svn_opt_subcommand_t' interface. */
+svn_error_t *
+svn_cl__help(apr_getopt_t *os,
+ void *baton,
+ apr_pool_t *pool)
+{
+ svn_cl__opt_state_t *opt_state;
+
+ /* xgettext: the %s is for SVN_VER_NUMBER. */
+ char help_header_template[] =
+ N_("usage: svn-bench <subcommand> [options] [args]\n"
+ "Subversion command-line client, version %s.\n"
+ "Type 'svn-bench help <subcommand>' for help on a specific subcommand.\n"
+ "Type 'svn-bench --version' to see the program version and RA modules\n"
+ " or 'svn-bench --version --quiet' to see just the version number.\n"
+ "\n"
+ "Most subcommands take file and/or directory arguments, recursing\n"
+ "on the directories. If no arguments are supplied to such a\n"
+ "command, it recurses on the current directory (inclusive) by default.\n"
+ "\n"
+ "Available subcommands:\n");
+
+ char help_footer[] =
+ N_("Subversion is a tool for version control.\n"
+ "For additional information, see http://subversion.apache.org/\n");
+
+ char *help_header =
+ apr_psprintf(pool, _(help_header_template), SVN_VER_NUMBER);
+
+ const char *ra_desc_start
+ = _("The following repository access (RA) modules are available:\n\n");
+
+ svn_stringbuf_t *version_footer;
+
+ if (baton)
+ opt_state = ((svn_cl__cmd_baton_t *) baton)->opt_state;
+ else
+ opt_state = NULL;
+
+ version_footer = svn_stringbuf_create(ra_desc_start, pool);
+ SVN_ERR(svn_ra_print_modules(version_footer, pool));
+
+ return svn_opt_print_help4(os,
+ "svn-bench", /* ### erm, derive somehow? */
+ opt_state ? opt_state->version : FALSE,
+ opt_state ? opt_state->quiet : FALSE,
+ opt_state ? opt_state->verbose : FALSE,
+ version_footer->data,
+ help_header, /* already gettext()'d */
+ svn_cl__cmd_table,
+ svn_cl__options,
+ svn_cl__global_options,
+ _(help_footer),
+ pool);
+}
diff --git a/tools/client-side/svn-bench/notify.c b/tools/client-side/svn-bench/notify.c
new file mode 100644
index 0000000..5e19d8a
--- /dev/null
+++ b/tools/client-side/svn-bench/notify.c
@@ -0,0 +1,1045 @@
+/*
+ * notify.c: feedback handlers for cmdline client.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#define APR_WANT_STDIO
+#define APR_WANT_STRFUNC
+#include <apr_want.h>
+
+#include "svn_cmdline.h"
+#include "svn_pools.h"
+#include "svn_dirent_uri.h"
+#include "svn_path.h"
+#include "svn_sorts.h"
+#include "cl.h"
+
+#include "svn_private_config.h"
+
+
+/* Baton for notify and friends. */
+struct notify_baton
+{
+ svn_boolean_t received_some_change;
+ svn_boolean_t is_checkout;
+ svn_boolean_t is_export;
+ svn_boolean_t is_wc_to_repos_copy;
+ svn_boolean_t sent_first_txdelta;
+ svn_boolean_t in_external;
+ svn_boolean_t had_print_error; /* Used to not keep printing error messages
+ when we've already had one print error. */
+
+ /* Conflict stats for update and merge. */
+ unsigned int text_conflicts;
+ unsigned int prop_conflicts;
+ unsigned int tree_conflicts;
+ unsigned int skipped_paths;
+ apr_hash_t *conflicted_paths;
+
+ /* The cwd, for use in decomposing absolute paths. */
+ const char *path_prefix;
+};
+
+
+/* Add a conflicted path to the list of conflicted paths stored
+ * in the notify baton. */
+static void
+add_conflicted_path(struct notify_baton *nb, const char *path)
+{
+ apr_hash_set(nb->conflicted_paths,
+ apr_pstrdup(apr_hash_pool_get(nb->conflicted_paths), path),
+ APR_HASH_KEY_STRING, "");
+}
+
+/* This implements `svn_wc_notify_func2_t'.
+ * NOTE: This function can't fail, so we just ignore any print errors. */
+static void
+notify(void *baton, const svn_wc_notify_t *n, apr_pool_t *pool)
+{
+ struct notify_baton *nb = baton;
+ char statchar_buf[5] = " ";
+ const char *path_local;
+ svn_error_t *err;
+
+ if (n->url)
+ path_local = n->url;
+ else
+ {
+ if (n->path_prefix)
+ path_local = svn_cl__local_style_skip_ancestor(n->path_prefix, n->path,
+ pool);
+ else /* skip nb->path_prefix, if it's non-null */
+ path_local = svn_cl__local_style_skip_ancestor(nb->path_prefix, n->path,
+ pool);
+ }
+
+ switch (n->action)
+ {
+ case svn_wc_notify_skip:
+ nb->skipped_paths++;
+ if (n->content_state == svn_wc_notify_state_missing)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, _("Skipped missing target: '%s'\n"),
+ path_local)))
+ goto print_error;
+ }
+ else if (n->content_state == svn_wc_notify_state_source_missing)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, _("Skipped target: '%s' -- copy-source is missing\n"),
+ path_local)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf
+ (pool, _("Skipped '%s'\n"), path_local)))
+ goto print_error;
+ }
+ break;
+ case svn_wc_notify_update_skip_obstruction:
+ nb->skipped_paths++;
+ if ((err = svn_cmdline_printf(
+ pool, _("Skipped '%s' -- An obstructing working copy was found\n"),
+ path_local)))
+ goto print_error;
+ break;
+ case svn_wc_notify_update_skip_working_only:
+ nb->skipped_paths++;
+ if ((err = svn_cmdline_printf(
+ pool, _("Skipped '%s' -- Has no versioned parent\n"),
+ path_local)))
+ goto print_error;
+ break;
+ case svn_wc_notify_update_skip_access_denied:
+ nb->skipped_paths++;
+ if ((err = svn_cmdline_printf(
+ pool, _("Skipped '%s' -- Access denied\n"),
+ path_local)))
+ goto print_error;
+ break;
+ case svn_wc_notify_skip_conflicted:
+ nb->skipped_paths++;
+ if ((err = svn_cmdline_printf(
+ pool, _("Skipped '%s' -- Node remains in conflict\n"),
+ path_local)))
+ goto print_error;
+ break;
+ case svn_wc_notify_update_delete:
+ case svn_wc_notify_exclude:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, "D %s\n", path_local)))
+ goto print_error;
+ break;
+ case svn_wc_notify_update_broken_lock:
+ if ((err = svn_cmdline_printf(pool, "B %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_external_removed:
+ nb->received_some_change = TRUE;
+ if (n->err && n->err->message)
+ {
+ if ((err = svn_cmdline_printf(pool, "Removed external '%s': %s\n",
+ path_local, n->err->message)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf(pool, "Removed external '%s'\n",
+ path_local)))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_left_local_modifications:
+ if ((err = svn_cmdline_printf(pool, "Left local modifications as '%s'\n",
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_replace:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, "R %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_add:
+ nb->received_some_change = TRUE;
+ if (n->content_state == svn_wc_notify_state_conflicted)
+ {
+ nb->text_conflicts++;
+ add_conflicted_path(nb, n->path);
+ if ((err = svn_cmdline_printf(pool, "C %s\n", path_local)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf(pool, "A %s\n", path_local)))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_exists:
+ nb->received_some_change = TRUE;
+ if (n->content_state == svn_wc_notify_state_conflicted)
+ {
+ nb->text_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[0] = 'C';
+ }
+ else
+ statchar_buf[0] = 'E';
+
+ if (n->prop_state == svn_wc_notify_state_conflicted)
+ {
+ nb->prop_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[1] = 'C';
+ }
+ else if (n->prop_state == svn_wc_notify_state_merged)
+ statchar_buf[1] = 'G';
+
+ if ((err = svn_cmdline_printf(pool, "%s %s\n", statchar_buf, path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_restore:
+ if ((err = svn_cmdline_printf(pool, _("Restored '%s'\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_revert:
+ if ((err = svn_cmdline_printf(pool, _("Reverted '%s'\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_failed_revert:
+ if (( err = svn_cmdline_printf(pool, _("Failed to revert '%s' -- "
+ "try updating instead.\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_resolved:
+ if ((err = svn_cmdline_printf(pool,
+ _("Resolved conflicted state of '%s'\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_add:
+ /* We *should* only get the MIME_TYPE if PATH is a file. If we
+ do get it, and the mime-type is not textual, note that this
+ is a binary addition. */
+ if (n->mime_type && (svn_mime_type_is_binary(n->mime_type)))
+ {
+ if ((err = svn_cmdline_printf(pool, "A (bin) %s\n",
+ path_local)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf(pool, "A %s\n",
+ path_local)))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_delete:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, "D %s\n",
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_patch:
+ {
+ nb->received_some_change = TRUE;
+ if (n->content_state == svn_wc_notify_state_conflicted)
+ {
+ nb->text_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[0] = 'C';
+ }
+ else if (n->kind == svn_node_file)
+ {
+ if (n->content_state == svn_wc_notify_state_merged)
+ statchar_buf[0] = 'G';
+ else if (n->content_state == svn_wc_notify_state_changed)
+ statchar_buf[0] = 'U';
+ }
+
+ if (n->prop_state == svn_wc_notify_state_conflicted)
+ {
+ nb->prop_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[1] = 'C';
+ }
+ else if (n->prop_state == svn_wc_notify_state_changed)
+ statchar_buf[1] = 'U';
+
+ if (statchar_buf[0] != ' ' || statchar_buf[1] != ' ')
+ {
+ if ((err = svn_cmdline_printf(pool, "%s %s\n",
+ statchar_buf, path_local)))
+ goto print_error;
+ }
+ }
+ break;
+
+ case svn_wc_notify_patch_applied_hunk:
+ nb->received_some_change = TRUE;
+ if (n->hunk_original_start != n->hunk_matched_line)
+ {
+ apr_uint64_t off;
+ const char *s;
+ const char *minus;
+
+ if (n->hunk_matched_line > n->hunk_original_start)
+ {
+ off = n->hunk_matched_line - n->hunk_original_start;
+ minus = "";
+ }
+ else
+ {
+ off = n->hunk_original_start - n->hunk_matched_line;
+ minus = "-";
+ }
+
+ /* ### We're creating the localized strings without
+ * ### APR_INT64_T_FMT since it isn't translator-friendly */
+ if (n->hunk_fuzz)
+ {
+
+ if (n->prop_name)
+ {
+ s = _("> applied hunk ## -%lu,%lu +%lu,%lu ## "
+ "with offset %s");
+
+ err = svn_cmdline_printf(pool,
+ apr_pstrcat(pool, s,
+ "%"APR_UINT64_T_FMT
+ " and fuzz %lu (%s)\n",
+ (char *)NULL),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ minus, off, n->hunk_fuzz,
+ n->prop_name);
+ }
+ else
+ {
+ s = _("> applied hunk @@ -%lu,%lu +%lu,%lu @@ "
+ "with offset %s");
+
+ err = svn_cmdline_printf(pool,
+ apr_pstrcat(pool, s,
+ "%"APR_UINT64_T_FMT
+ " and fuzz %lu\n",
+ (char *)NULL),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ minus, off, n->hunk_fuzz);
+ }
+
+ if (err)
+ goto print_error;
+ }
+ else
+ {
+
+ if (n->prop_name)
+ {
+ s = _("> applied hunk ## -%lu,%lu +%lu,%lu ## "
+ "with offset %s");
+ err = svn_cmdline_printf(pool,
+ apr_pstrcat(pool, s,
+ "%"APR_UINT64_T_FMT" (%s)\n",
+ (char *)NULL),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ minus, off, n->prop_name);
+ }
+ else
+ {
+ s = _("> applied hunk @@ -%lu,%lu +%lu,%lu @@ "
+ "with offset %s");
+ err = svn_cmdline_printf(pool,
+ apr_pstrcat(pool, s,
+ "%"APR_UINT64_T_FMT"\n",
+ (char *)NULL),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ minus, off);
+ }
+
+ if (err)
+ goto print_error;
+ }
+ }
+ else if (n->hunk_fuzz)
+ {
+ if (n->prop_name)
+ err = svn_cmdline_printf(pool,
+ _("> applied hunk ## -%lu,%lu +%lu,%lu ## "
+ "with fuzz %lu (%s)\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ n->hunk_fuzz,
+ n->prop_name);
+ else
+ err = svn_cmdline_printf(pool,
+ _("> applied hunk @@ -%lu,%lu +%lu,%lu @@ "
+ "with fuzz %lu\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ n->hunk_fuzz);
+ if (err)
+ goto print_error;
+
+ }
+ break;
+
+ case svn_wc_notify_patch_rejected_hunk:
+ nb->received_some_change = TRUE;
+
+ if (n->prop_name)
+ err = svn_cmdline_printf(pool,
+ _("> rejected hunk "
+ "## -%lu,%lu +%lu,%lu ## (%s)\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ n->prop_name);
+ else
+ err = svn_cmdline_printf(pool,
+ _("> rejected hunk "
+ "@@ -%lu,%lu +%lu,%lu @@\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_patch_hunk_already_applied:
+ nb->received_some_change = TRUE;
+ if (n->prop_name)
+ err = svn_cmdline_printf(pool,
+ _("> hunk "
+ "## -%lu,%lu +%lu,%lu ## "
+ "already applied (%s)\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length,
+ n->prop_name);
+ else
+ err = svn_cmdline_printf(pool,
+ _("> hunk "
+ "@@ -%lu,%lu +%lu,%lu @@ "
+ "already applied\n"),
+ n->hunk_original_start,
+ n->hunk_original_length,
+ n->hunk_modified_start,
+ n->hunk_modified_length);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_update:
+ case svn_wc_notify_merge_record_info:
+ {
+ if (n->content_state == svn_wc_notify_state_conflicted)
+ {
+ nb->text_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[0] = 'C';
+ }
+ else if (n->kind == svn_node_file)
+ {
+ if (n->content_state == svn_wc_notify_state_merged)
+ statchar_buf[0] = 'G';
+ else if (n->content_state == svn_wc_notify_state_changed)
+ statchar_buf[0] = 'U';
+ }
+
+ if (n->prop_state == svn_wc_notify_state_conflicted)
+ {
+ nb->prop_conflicts++;
+ add_conflicted_path(nb, n->path);
+ statchar_buf[1] = 'C';
+ }
+ else if (n->prop_state == svn_wc_notify_state_merged)
+ statchar_buf[1] = 'G';
+ else if (n->prop_state == svn_wc_notify_state_changed)
+ statchar_buf[1] = 'U';
+
+ if (n->lock_state == svn_wc_notify_lock_state_unlocked)
+ statchar_buf[2] = 'B';
+
+ if (statchar_buf[0] != ' ' || statchar_buf[1] != ' ')
+ nb->received_some_change = TRUE;
+
+ if (statchar_buf[0] != ' ' || statchar_buf[1] != ' '
+ || statchar_buf[2] != ' ')
+ {
+ if ((err = svn_cmdline_printf(pool, "%s %s\n",
+ statchar_buf, path_local)))
+ goto print_error;
+ }
+ }
+ break;
+
+ case svn_wc_notify_update_external:
+ /* Remember that we're now "inside" an externals definition. */
+ nb->in_external = TRUE;
+
+ /* Currently this is used for checkouts and switches too. If we
+ want different output, we'll have to add new actions. */
+ if ((err = svn_cmdline_printf(pool,
+ _("\nFetching external item into '%s':\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_failed_external:
+ /* If we are currently inside the handling of an externals
+ definition, then we can simply present n->err as a warning
+ and feel confident that after this, we aren't handling that
+ externals definition any longer. */
+ if (nb->in_external)
+ {
+ svn_handle_warning2(stderr, n->err, "svn: ");
+ nb->in_external = FALSE;
+ if ((err = svn_cmdline_printf(pool, "\n")))
+ goto print_error;
+ }
+ /* Otherwise, we'll just print two warnings. Why? Because
+ svn_handle_warning2() only shows the single "best message",
+ but we have two pretty important ones: that the external at
+ '/some/path' didn't pan out, and then the more specific
+ reason why (from n->err). */
+ else
+ {
+ svn_error_t *warn_err =
+ svn_error_createf(SVN_ERR_BASE, NULL,
+ _("Error handling externals definition for '%s':"),
+ path_local);
+ svn_handle_warning2(stderr, warn_err, "svn: ");
+ svn_error_clear(warn_err);
+ svn_handle_warning2(stderr, n->err, "svn: ");
+ }
+ break;
+
+ case svn_wc_notify_update_started:
+ if (! (nb->in_external ||
+ nb->is_checkout ||
+ nb->is_export))
+ {
+ if ((err = svn_cmdline_printf(pool, _("Updating '%s':\n"),
+ path_local)))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_update_completed:
+ {
+ if (SVN_IS_VALID_REVNUM(n->revision))
+ {
+ if (nb->is_export)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("Exported external at revision %ld.\n")
+ : _("Exported revision %ld.\n"),
+ n->revision)))
+ goto print_error;
+ }
+ else if (nb->is_checkout)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("Checked out external at revision %ld.\n")
+ : _("Checked out revision %ld.\n"),
+ n->revision)))
+ goto print_error;
+ }
+ else
+ {
+ if (nb->received_some_change)
+ {
+ nb->received_some_change = FALSE;
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("Updated external to revision %ld.\n")
+ : _("Updated to revision %ld.\n"),
+ n->revision)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("External at revision %ld.\n")
+ : _("At revision %ld.\n"),
+ n->revision)))
+ goto print_error;
+ }
+ }
+ }
+ else /* no revision */
+ {
+ if (nb->is_export)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("External export complete.\n")
+ : _("Export complete.\n"))))
+ goto print_error;
+ }
+ else if (nb->is_checkout)
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("External checkout complete.\n")
+ : _("Checkout complete.\n"))))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf
+ (pool, nb->in_external
+ ? _("External update complete.\n")
+ : _("Update complete.\n"))))
+ goto print_error;
+ }
+ }
+ }
+
+ if (nb->in_external)
+ {
+ nb->in_external = FALSE;
+ if ((err = svn_cmdline_printf(pool, "\n")))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_status_external:
+ if ((err = svn_cmdline_printf
+ (pool, _("\nPerforming status on external item at '%s':\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_status_completed:
+ if (SVN_IS_VALID_REVNUM(n->revision))
+ if ((err = svn_cmdline_printf(pool,
+ _("Status against revision: %6ld\n"),
+ n->revision)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_commit_modified:
+ /* xgettext: Align the %s's on this and the following 4 messages */
+ if ((err = svn_cmdline_printf(pool,
+ nb->is_wc_to_repos_copy
+ ? _("Sending copy of %s\n")
+ : _("Sending %s\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_commit_added:
+ case svn_wc_notify_commit_copied:
+ if (n->mime_type && svn_mime_type_is_binary(n->mime_type))
+ {
+ if ((err = svn_cmdline_printf(pool,
+ nb->is_wc_to_repos_copy
+ ? _("Adding copy of (bin) %s\n")
+ : _("Adding (bin) %s\n"),
+ path_local)))
+ goto print_error;
+ }
+ else
+ {
+ if ((err = svn_cmdline_printf(pool,
+ nb->is_wc_to_repos_copy
+ ? _("Adding copy of %s\n")
+ : _("Adding %s\n"),
+ path_local)))
+ goto print_error;
+ }
+ break;
+
+ case svn_wc_notify_commit_deleted:
+ if ((err = svn_cmdline_printf(pool,
+ nb->is_wc_to_repos_copy
+ ? _("Deleting copy of %s\n")
+ : _("Deleting %s\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_commit_replaced:
+ case svn_wc_notify_commit_copied_replaced:
+ if ((err = svn_cmdline_printf(pool,
+ nb->is_wc_to_repos_copy
+ ? _("Replacing copy of %s\n")
+ : _("Replacing %s\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_commit_postfix_txdelta:
+ if (! nb->sent_first_txdelta)
+ {
+ nb->sent_first_txdelta = TRUE;
+ if ((err = svn_cmdline_printf(pool,
+ _("Transmitting file data "))))
+ goto print_error;
+ }
+
+ if ((err = svn_cmdline_printf(pool, ".")))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_locked:
+ if ((err = svn_cmdline_printf(pool, _("'%s' locked by user '%s'.\n"),
+ path_local, n->lock->owner)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_unlocked:
+ if ((err = svn_cmdline_printf(pool, _("'%s' unlocked.\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_failed_lock:
+ case svn_wc_notify_failed_unlock:
+ svn_handle_warning2(stderr, n->err, "svn: ");
+ break;
+
+ case svn_wc_notify_changelist_set:
+ if ((err = svn_cmdline_printf(pool, "A [%s] %s\n",
+ n->changelist_name, path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_changelist_clear:
+ case svn_wc_notify_changelist_moved:
+ if ((err = svn_cmdline_printf(pool,
+ "D [%s] %s\n",
+ n->changelist_name, path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_merge_begin:
+ if (n->merge_range == NULL)
+ err = svn_cmdline_printf(pool,
+ _("--- Merging differences between "
+ "repository URLs into '%s':\n"),
+ path_local);
+ else if (n->merge_range->start == n->merge_range->end - 1
+ || n->merge_range->start == n->merge_range->end)
+ err = svn_cmdline_printf(pool, _("--- Merging r%ld into '%s':\n"),
+ n->merge_range->end, path_local);
+ else if (n->merge_range->start - 1 == n->merge_range->end)
+ err = svn_cmdline_printf(pool,
+ _("--- Reverse-merging r%ld into '%s':\n"),
+ n->merge_range->start, path_local);
+ else if (n->merge_range->start < n->merge_range->end)
+ err = svn_cmdline_printf(pool,
+ _("--- Merging r%ld through r%ld into "
+ "'%s':\n"),
+ n->merge_range->start + 1,
+ n->merge_range->end, path_local);
+ else /* n->merge_range->start > n->merge_range->end - 1 */
+ err = svn_cmdline_printf(pool,
+ _("--- Reverse-merging r%ld through r%ld "
+ "into '%s':\n"),
+ n->merge_range->start,
+ n->merge_range->end + 1, path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_merge_record_info_begin:
+ if (!n->merge_range)
+ {
+ err = svn_cmdline_printf(pool,
+ _("--- Recording mergeinfo for merge "
+ "between repository URLs into '%s':\n"),
+ path_local);
+ }
+ else
+ {
+ if (n->merge_range->start == n->merge_range->end - 1
+ || n->merge_range->start == n->merge_range->end)
+ err = svn_cmdline_printf(
+ pool,
+ _("--- Recording mergeinfo for merge of r%ld into '%s':\n"),
+ n->merge_range->end, path_local);
+ else if (n->merge_range->start - 1 == n->merge_range->end)
+ err = svn_cmdline_printf(
+ pool,
+ _("--- Recording mergeinfo for reverse merge of r%ld into '%s':\n"),
+ n->merge_range->start, path_local);
+ else if (n->merge_range->start < n->merge_range->end)
+ err = svn_cmdline_printf(
+ pool,
+ _("--- Recording mergeinfo for merge of r%ld through r%ld into '%s':\n"),
+ n->merge_range->start + 1, n->merge_range->end, path_local);
+ else /* n->merge_range->start > n->merge_range->end - 1 */
+ err = svn_cmdline_printf(
+ pool,
+ _("--- Recording mergeinfo for reverse merge of r%ld through r%ld into '%s':\n"),
+ n->merge_range->start, n->merge_range->end + 1, path_local);
+ }
+
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_merge_elide_info:
+ if ((err = svn_cmdline_printf(pool,
+ _("--- Eliding mergeinfo from '%s':\n"),
+ path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_foreign_merge_begin:
+ if (n->merge_range == NULL)
+ err = svn_cmdline_printf(pool,
+ _("--- Merging differences between "
+ "foreign repository URLs into '%s':\n"),
+ path_local);
+ else if (n->merge_range->start == n->merge_range->end - 1
+ || n->merge_range->start == n->merge_range->end)
+ err = svn_cmdline_printf(pool,
+ _("--- Merging (from foreign repository) "
+ "r%ld into '%s':\n"),
+ n->merge_range->end, path_local);
+ else if (n->merge_range->start - 1 == n->merge_range->end)
+ err = svn_cmdline_printf(pool,
+ _("--- Reverse-merging (from foreign "
+ "repository) r%ld into '%s':\n"),
+ n->merge_range->start, path_local);
+ else if (n->merge_range->start < n->merge_range->end)
+ err = svn_cmdline_printf(pool,
+ _("--- Merging (from foreign repository) "
+ "r%ld through r%ld into '%s':\n"),
+ n->merge_range->start + 1,
+ n->merge_range->end, path_local);
+ else /* n->merge_range->start > n->merge_range->end - 1 */
+ err = svn_cmdline_printf(pool,
+ _("--- Reverse-merging (from foreign "
+ "repository) r%ld through r%ld into "
+ "'%s':\n"),
+ n->merge_range->start,
+ n->merge_range->end + 1, path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_tree_conflict:
+ nb->tree_conflicts++;
+ add_conflicted_path(nb, n->path);
+ if ((err = svn_cmdline_printf(pool, " C %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_shadowed_add:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, " A %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_shadowed_update:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, " U %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_update_shadowed_delete:
+ nb->received_some_change = TRUE;
+ if ((err = svn_cmdline_printf(pool, " D %s\n", path_local)))
+ goto print_error;
+ break;
+
+ case svn_wc_notify_property_modified:
+ case svn_wc_notify_property_added:
+ err = svn_cmdline_printf(pool,
+ _("property '%s' set on '%s'\n"),
+ n->prop_name, path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_property_deleted:
+ err = svn_cmdline_printf(pool,
+ _("property '%s' deleted from '%s'.\n"),
+ n->prop_name, path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_property_deleted_nonexistent:
+ err = svn_cmdline_printf(pool,
+ _("Attempting to delete nonexistent "
+ "property '%s' on '%s'\n"), n->prop_name,
+ path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_revprop_set:
+ err = svn_cmdline_printf(pool,
+ _("property '%s' set on repository revision %ld\n"),
+ n->prop_name, n->revision);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_revprop_deleted:
+ err = svn_cmdline_printf(pool,
+ _("property '%s' deleted from repository revision %ld\n"),
+ n->prop_name, n->revision);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_upgraded_path:
+ err = svn_cmdline_printf(pool, _("Upgraded '%s'\n"), path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_url_redirect:
+ err = svn_cmdline_printf(pool, _("Redirecting to URL '%s':\n"),
+ n->url);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_path_nonexistent:
+ err = svn_cmdline_printf(pool, _("'%s' is not under version control"),
+ path_local);
+ if (err)
+ goto print_error;
+ break;
+
+ case svn_wc_notify_conflict_resolver_starting:
+ /* Once all operations invoke the interactive conflict resolution after
+ * they've completed, we can run svn_cl__print_conflict_stats() here. */
+ break;
+
+ case svn_wc_notify_conflict_resolver_done:
+ break;
+
+ default:
+ break;
+ }
+
+ if ((err = svn_cmdline_fflush(stdout)))
+ goto print_error;
+
+ return;
+
+ print_error:
+ /* If we had no errors before, print this error to stderr. Else, don't print
+ anything. The user already knows there were some output errors,
+ so there is no point in flooding her with an error per notification. */
+ if (!nb->had_print_error)
+ {
+ nb->had_print_error = TRUE;
+ /* Issue #3014:
+ * Don't print anything on broken pipes. The pipe was likely
+ * closed by the process at the other end. We expect that
+ * process to perform error reporting as necessary.
+ *
+ * ### This assumes that there is only one error in a chain for
+ * ### SVN_ERR_IO_PIPE_WRITE_ERROR. See svn_cmdline_fputs(). */
+ if (err->apr_err != SVN_ERR_IO_PIPE_WRITE_ERROR)
+ svn_handle_error2(err, stderr, FALSE, "svn: ");
+ }
+ svn_error_clear(err);
+}
+
+
+svn_error_t *
+svn_cl__get_notifier(svn_wc_notify_func2_t *notify_func_p,
+ void **notify_baton_p,
+ apr_pool_t *pool)
+{
+ struct notify_baton *nb = apr_pcalloc(pool, sizeof(*nb));
+
+ nb->received_some_change = FALSE;
+ nb->sent_first_txdelta = FALSE;
+ nb->is_checkout = FALSE;
+ nb->is_export = FALSE;
+ nb->is_wc_to_repos_copy = FALSE;
+ nb->in_external = FALSE;
+ nb->had_print_error = FALSE;
+ nb->text_conflicts = 0;
+ nb->prop_conflicts = 0;
+ nb->tree_conflicts = 0;
+ nb->skipped_paths = 0;
+ nb->conflicted_paths = apr_hash_make(pool);
+ SVN_ERR(svn_dirent_get_absolute(&nb->path_prefix, "", pool));
+
+ *notify_func_p = notify;
+ *notify_baton_p = nb;
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_cl__notifier_mark_export(void *baton)
+{
+ struct notify_baton *nb = baton;
+
+ nb->is_export = TRUE;
+ return SVN_NO_ERROR;
+}
diff --git a/tools/client-side/svn-bench/null-export-cmd.c b/tools/client-side/svn-bench/null-export-cmd.c
new file mode 100644
index 0000000..8220bfb
--- /dev/null
+++ b/tools/client-side/svn-bench/null-export-cmd.c
@@ -0,0 +1,346 @@
+/*
+ * export-cmd.c -- Subversion export command
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#include "svn_client.h"
+#include "svn_error.h"
+#include "svn_dirent_uri.h"
+#include "svn_path.h"
+#include "svn_cmdline.h"
+#include "cl.h"
+
+#include "svn_private_config.h"
+#include "private/svn_string_private.h"
+#include "private/svn_client_private.h"
+
+/*** The export editor code. ***/
+
+/* ---------------------------------------------------------------------- */
+
+/*** A dedicated 'export' editor, which does no .svn/ accounting. ***/
+
+typedef struct edit_baton_t
+{
+ apr_int64_t file_count;
+ apr_int64_t dir_count;
+ apr_int64_t byte_count;
+ apr_int64_t prop_count;
+ apr_int64_t prop_byte_count;
+} edit_baton_t;
+
+static svn_error_t *
+set_target_revision(void *edit_baton,
+ svn_revnum_t target_revision,
+ apr_pool_t *pool)
+{
+ return SVN_NO_ERROR;
+}
+
+
+/* Just ensure that the main export directory exists. */
+static svn_error_t *
+open_root(void *edit_baton,
+ svn_revnum_t base_revision,
+ apr_pool_t *pool,
+ void **root_baton)
+{
+ *root_baton = edit_baton;
+ return SVN_NO_ERROR;
+}
+
+
+/* Ensure the directory exists, and send feedback. */
+static svn_error_t *
+add_directory(const char *path,
+ void *parent_baton,
+ const char *copyfrom_path,
+ svn_revnum_t copyfrom_revision,
+ apr_pool_t *pool,
+ void **baton)
+{
+ edit_baton_t *eb = parent_baton;
+ eb->dir_count++;
+
+ *baton = parent_baton;
+ return SVN_NO_ERROR;
+}
+
+
+/* Build a file baton. */
+static svn_error_t *
+add_file(const char *path,
+ void *parent_baton,
+ const char *copyfrom_path,
+ svn_revnum_t copyfrom_revision,
+ apr_pool_t *pool,
+ void **baton)
+{
+ edit_baton_t *eb = parent_baton;
+ eb->file_count++;
+
+ *baton = parent_baton;
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+window_handler(svn_txdelta_window_t *window, void *baton)
+{
+ edit_baton_t *eb = baton;
+ if (window != NULL)
+ eb->byte_count += window->tview_len;
+
+ return SVN_NO_ERROR;
+}
+
+/* Write incoming data into the tmpfile stream */
+
+static svn_error_t *
+apply_textdelta(void *file_baton,
+ const char *base_checksum,
+ apr_pool_t *pool,
+ svn_txdelta_window_handler_t *handler,
+ void **handler_baton)
+{
+ *handler_baton = file_baton;
+ *handler = window_handler;
+
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+change_file_prop(void *file_baton,
+ const char *name,
+ const svn_string_t *value,
+ apr_pool_t *pool)
+{
+ edit_baton_t *eb = file_baton;
+ eb->prop_count++;
+ eb->prop_byte_count += value->len;
+
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+change_dir_prop(void *dir_baton,
+ const char *name,
+ const svn_string_t *value,
+ apr_pool_t *pool)
+{
+ edit_baton_t *eb = dir_baton;
+ eb->prop_count++;
+
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+close_file(void *file_baton,
+ const char *text_checksum,
+ apr_pool_t *pool)
+{
+ return SVN_NO_ERROR;
+}
+
+
+/*** Public Interfaces ***/
+
+static svn_error_t *
+bench_null_export(svn_revnum_t *result_rev,
+ const char *from_path_or_url,
+ svn_opt_revision_t *peg_revision,
+ svn_opt_revision_t *revision,
+ svn_depth_t depth,
+ void *baton,
+ svn_client_ctx_t *ctx,
+ svn_boolean_t quiet,
+ apr_pool_t *pool)
+{
+ svn_revnum_t edit_revision = SVN_INVALID_REVNUM;
+ svn_boolean_t from_is_url = svn_path_is_url(from_path_or_url);
+
+ SVN_ERR_ASSERT(peg_revision != NULL);
+ SVN_ERR_ASSERT(revision != NULL);
+
+ if (peg_revision->kind == svn_opt_revision_unspecified)
+ peg_revision->kind = svn_path_is_url(from_path_or_url)
+ ? svn_opt_revision_head
+ : svn_opt_revision_working;
+
+ if (revision->kind == svn_opt_revision_unspecified)
+ revision = peg_revision;
+
+ if (from_is_url || ! SVN_CLIENT__REVKIND_IS_LOCAL_TO_WC(revision->kind))
+ {
+ svn_client__pathrev_t *loc;
+ svn_ra_session_t *ra_session;
+ svn_node_kind_t kind;
+
+ /* Get the RA connection. */
+ SVN_ERR(svn_client__ra_session_from_path2(&ra_session, &loc,
+ from_path_or_url, NULL,
+ peg_revision,
+ revision, ctx, pool));
+
+ SVN_ERR(svn_ra_check_path(ra_session, "", loc->rev, &kind, pool));
+
+ if (kind == svn_node_file)
+ {
+ apr_hash_t *props;
+
+ /* Since you cannot actually root an editor at a file, we
+ * manually drive a few functions of our editor. */
+
+ /* Step outside the editor-likeness for a moment, to actually talk
+ * to the repository. */
+ /* ### note: the stream will not be closed */
+ SVN_ERR(svn_ra_get_file(ra_session, "", loc->rev,
+ svn_stream_empty(pool),
+ NULL, &props, pool));
+ }
+ else if (kind == svn_node_dir)
+ {
+ void *edit_baton = NULL;
+ const svn_delta_editor_t *export_editor = NULL;
+ const svn_ra_reporter3_t *reporter;
+ void *report_baton;
+
+ svn_delta_editor_t *editor = svn_delta_default_editor(pool);
+
+ editor->set_target_revision = set_target_revision;
+ editor->open_root = open_root;
+ editor->add_directory = add_directory;
+ editor->add_file = add_file;
+ editor->apply_textdelta = apply_textdelta;
+ editor->close_file = close_file;
+ editor->change_file_prop = change_file_prop;
+ editor->change_dir_prop = change_dir_prop;
+
+ /* for ra_svn, we don't need an editior in quiet mode */
+ if (!quiet || strncmp(loc->repos_root_url, "svn:", 4))
+ SVN_ERR(svn_delta_get_cancellation_editor(ctx->cancel_func,
+ ctx->cancel_baton,
+ editor,
+ baton,
+ &export_editor,
+ &edit_baton,
+ pool));
+
+ /* Manufacture a basic 'report' to the update reporter. */
+ SVN_ERR(svn_ra_do_update3(ra_session,
+ &reporter, &report_baton,
+ loc->rev,
+ "", /* no sub-target */
+ depth,
+ FALSE, /* don't want copyfrom-args */
+ FALSE, /* don't want ignore_ancestry */
+ export_editor, edit_baton,
+ pool, pool));
+
+ SVN_ERR(reporter->set_path(report_baton, "", loc->rev,
+ /* Depth is irrelevant, as we're
+ passing start_empty=TRUE anyway. */
+ svn_depth_infinity,
+ TRUE, /* "help, my dir is empty!" */
+ NULL, pool));
+
+ SVN_ERR(reporter->finish_report(report_baton, pool));
+ }
+ else if (kind == svn_node_none)
+ {
+ return svn_error_createf(SVN_ERR_RA_ILLEGAL_URL, NULL,
+ _("URL '%s' doesn't exist"),
+ from_path_or_url);
+ }
+ /* kind == svn_node_unknown not handled */
+ }
+
+
+ if (result_rev)
+ *result_rev = edit_revision;
+
+ return SVN_NO_ERROR;
+}
+
+
+/*** Code. ***/
+
+/* This implements the `svn_opt_subcommand_t' interface. */
+svn_error_t *
+svn_cl__null_export(apr_getopt_t *os,
+ void *baton,
+ apr_pool_t *pool)
+{
+ svn_cl__opt_state_t *opt_state = ((svn_cl__cmd_baton_t *) baton)->opt_state;
+ svn_client_ctx_t *ctx = ((svn_cl__cmd_baton_t *) baton)->ctx;
+ const char *from;
+ apr_array_header_t *targets;
+ svn_error_t *err;
+ svn_opt_revision_t peg_revision;
+ const char *truefrom;
+ edit_baton_t eb = { 0 };
+
+ SVN_ERR(svn_cl__args_to_target_array_print_reserved(&targets, os,
+ opt_state->targets,
+ ctx, FALSE, pool));
+
+ /* We want exactly 1 or 2 targets for this subcommand. */
+ if (targets->nelts < 1)
+ return svn_error_create(SVN_ERR_CL_INSUFFICIENT_ARGS, 0, NULL);
+ if (targets->nelts > 2)
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, 0, NULL);
+
+ /* The first target is the `from' path. */
+ from = APR_ARRAY_IDX(targets, 0, const char *);
+
+ /* Get the peg revision if present. */
+ SVN_ERR(svn_opt_parse_path(&peg_revision, &truefrom, from, pool));
+
+ if (opt_state->depth == svn_depth_unknown)
+ opt_state->depth = svn_depth_infinity;
+
+ /* Do the export. */
+ err = bench_null_export(NULL, truefrom, &peg_revision,
+ &(opt_state->start_revision),
+ opt_state->depth,
+ &eb,
+ ctx, opt_state->quiet, pool);
+
+ if (!opt_state->quiet)
+ SVN_ERR(svn_cmdline_printf(pool,
+ _("%15s directories\n"
+ "%15s files\n"
+ "%15s bytes in files\n"
+ "%15s properties\n"
+ "%15s bytes in properties\n"),
+ svn__ui64toa_sep(eb.dir_count, ',', pool),
+ svn__ui64toa_sep(eb.file_count, ',', pool),
+ svn__ui64toa_sep(eb.byte_count, ',', pool),
+ svn__ui64toa_sep(eb.prop_count, ',', pool),
+ svn__ui64toa_sep(eb.prop_byte_count, ',', pool)));
+
+ return svn_error_trace(err);
+}
diff --git a/tools/client-side/svn-bench/null-list-cmd.c b/tools/client-side/svn-bench/null-list-cmd.c
new file mode 100644
index 0000000..8aa08cd
--- /dev/null
+++ b/tools/client-side/svn-bench/null-list-cmd.c
@@ -0,0 +1,169 @@
+/*
+ * list-cmd.c -- list a URL
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_cmdline.h"
+#include "svn_client.h"
+#include "svn_error.h"
+#include "svn_pools.h"
+#include "svn_time.h"
+#include "svn_xml.h"
+#include "svn_dirent_uri.h"
+#include "svn_path.h"
+#include "svn_utf.h"
+#include "svn_opt.h"
+
+#include "cl.h"
+
+#include "svn_private_config.h"
+#include "private/svn_string_private.h"
+
+
+
+/* Baton used when printing directory entries. */
+struct print_baton {
+ svn_boolean_t verbose;
+ apr_int64_t directories;
+ apr_int64_t files;
+ apr_int64_t locks;
+ svn_client_ctx_t *ctx;
+};
+
+/* This implements the svn_client_list_func2_t API, printing a single
+ directory entry in text format. */
+static svn_error_t *
+print_dirent(void *baton,
+ const char *path,
+ const svn_dirent_t *dirent,
+ const svn_lock_t *lock,
+ const char *abs_path,
+ const char *external_parent_url,
+ const char *external_target,
+ apr_pool_t *pool)
+{
+ struct print_baton *pb = baton;
+
+ if (pb->ctx->cancel_func)
+ SVN_ERR(pb->ctx->cancel_func(pb->ctx->cancel_baton));
+
+ if (dirent->kind == svn_node_dir)
+ pb->directories++;
+ if (dirent->kind == svn_node_file)
+ pb->files++;
+ if (lock)
+ pb->locks++;
+
+ return SVN_NO_ERROR;
+}
+
+
+/* This implements the `svn_opt_subcommand_t' interface. */
+svn_error_t *
+svn_cl__null_list(apr_getopt_t *os,
+ void *baton,
+ apr_pool_t *pool)
+{
+ svn_cl__opt_state_t *opt_state = ((svn_cl__cmd_baton_t *) baton)->opt_state;
+ svn_client_ctx_t *ctx = ((svn_cl__cmd_baton_t *) baton)->ctx;
+ apr_array_header_t *targets;
+ int i;
+ apr_pool_t *subpool = svn_pool_create(pool);
+ apr_uint32_t dirent_fields;
+ struct print_baton pb = { FALSE };
+ svn_boolean_t seen_nonexistent_target = FALSE;
+ svn_error_t *err;
+
+ SVN_ERR(svn_cl__args_to_target_array_print_reserved(&targets, os,
+ opt_state->targets,
+ ctx, FALSE, pool));
+
+ /* Add "." if user passed 0 arguments */
+ svn_opt_push_implicit_dot_target(targets, pool);
+
+ if (opt_state->verbose)
+ dirent_fields = SVN_DIRENT_ALL;
+ else
+ dirent_fields = SVN_DIRENT_KIND; /* the only thing we actually need... */
+
+ pb.ctx = ctx;
+ pb.verbose = opt_state->verbose;
+
+ if (opt_state->depth == svn_depth_unknown)
+ opt_state->depth = svn_depth_immediates;
+
+ /* For each target, try to list it. */
+ for (i = 0; i < targets->nelts; i++)
+ {
+ const char *target = APR_ARRAY_IDX(targets, i, const char *);
+ const char *truepath;
+ svn_opt_revision_t peg_revision;
+
+ svn_pool_clear(subpool);
+
+ SVN_ERR(svn_cl__check_cancel(ctx->cancel_baton));
+
+ /* Get peg revisions. */
+ SVN_ERR(svn_opt_parse_path(&peg_revision, &truepath, target,
+ subpool));
+
+ err = svn_client_list3(truepath, &peg_revision,
+ &(opt_state->start_revision),
+ opt_state->depth,
+ dirent_fields,
+ opt_state->verbose,
+ FALSE, /* include externals */
+ print_dirent,
+ &pb, ctx, subpool);
+
+ if (err)
+ {
+ /* If one of the targets is a non-existent URL or wc-entry,
+ don't bail out. Just warn and move on to the next target. */
+ if (err->apr_err == SVN_ERR_WC_PATH_NOT_FOUND ||
+ err->apr_err == SVN_ERR_FS_NOT_FOUND)
+ svn_handle_warning2(stderr, err, "svn-bench: ");
+ else
+ return svn_error_trace(err);
+
+ svn_error_clear(err);
+ err = NULL;
+ seen_nonexistent_target = TRUE;
+ }
+ else if (!opt_state->quiet)
+ SVN_ERR(svn_cmdline_printf(pool,
+ _("%15s directories\n"
+ "%15s files\n"
+ "%15s locks\n"),
+ svn__ui64toa_sep(pb.directories, ',', pool),
+ svn__ui64toa_sep(pb.files, ',', pool),
+ svn__ui64toa_sep(pb.locks, ',', pool)));
+ }
+
+ svn_pool_destroy(subpool);
+
+ if (seen_nonexistent_target)
+ return svn_error_create(
+ SVN_ERR_ILLEGAL_TARGET, NULL,
+ _("Could not list all targets because some targets don't exist"));
+ else
+ return SVN_NO_ERROR;
+}
diff --git a/tools/client-side/svn-bench/null-log-cmd.c b/tools/client-side/svn-bench/null-log-cmd.c
new file mode 100644
index 0000000..b35c8f2
--- /dev/null
+++ b/tools/client-side/svn-bench/null-log-cmd.c
@@ -0,0 +1,243 @@
+/*
+ * log-cmd.c -- Display log messages
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#define APR_WANT_STRFUNC
+#define APR_WANT_STDIO
+#include <apr_want.h>
+
+#include "svn_cmdline.h"
+#include "svn_compat.h"
+#include "svn_path.h"
+#include "svn_props.h"
+
+#include "cl.h"
+
+#include "svn_private_config.h"
+#include "private/svn_string_private.h"
+
+
+/*** Code. ***/
+
+/* Baton for log_entry_receiver() and log_entry_receiver_xml(). */
+struct log_receiver_baton
+{
+ /* Client context. */
+ svn_client_ctx_t *ctx;
+
+ /* Level of merge revision nesting */
+ apr_size_t merge_depth;
+
+ /* collect counters? */
+ svn_boolean_t quiet;
+
+ /* total revision counters */
+ apr_int64_t revisions;
+ apr_int64_t changes;
+ apr_int64_t message_lines;
+
+ /* part that came from merges */
+ apr_int64_t merges;
+ apr_int64_t merged_revs;
+ apr_int64_t merged_changes;
+ apr_int64_t merged_message_lines;
+};
+
+
+/* Implement `svn_log_entry_receiver_t', printing the logs in
+ * a human-readable and machine-parseable format.
+ *
+ * BATON is of type `struct log_receiver_baton'.
+ */
+static svn_error_t *
+log_entry_receiver(void *baton,
+ svn_log_entry_t *log_entry,
+ apr_pool_t *pool)
+{
+ struct log_receiver_baton *lb = baton;
+ const char *author;
+ const char *date;
+ const char *message;
+
+ if (lb->ctx->cancel_func)
+ SVN_ERR(lb->ctx->cancel_func(lb->ctx->cancel_baton));
+
+ if (! SVN_IS_VALID_REVNUM(log_entry->revision))
+ {
+ lb->merge_depth--;
+ return SVN_NO_ERROR;
+ }
+
+ /* if we don't want counters, we are done */
+ if (lb->quiet)
+ return SVN_NO_ERROR;
+
+ /* extract the message and do all the other counting */
+ svn_compat_log_revprops_out(&author, &date, &message, log_entry->revprops);
+ if (log_entry->revision == 0 && message == NULL)
+ return SVN_NO_ERROR;
+
+ lb->revisions++;
+ if (lb->merge_depth)
+ lb->merged_revs++;
+
+ if (message != NULL)
+ {
+ int count = svn_cstring_count_newlines(message) + 1;
+ lb->message_lines += count;
+ if (lb->merge_depth)
+ lb->merged_message_lines += count;
+ }
+
+ if (log_entry->changed_paths2)
+ {
+ unsigned count = apr_hash_count(log_entry->changed_paths2);
+ lb->changes += count;
+ if (lb->merge_depth)
+ lb->merged_changes += count;
+ }
+
+ if (log_entry->has_children)
+ {
+ lb->merge_depth++;
+ lb->merges++;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* This implements the `svn_opt_subcommand_t' interface. */
+svn_error_t *
+svn_cl__null_log(apr_getopt_t *os,
+ void *baton,
+ apr_pool_t *pool)
+{
+ svn_cl__opt_state_t *opt_state = ((svn_cl__cmd_baton_t *) baton)->opt_state;
+ svn_client_ctx_t *ctx = ((svn_cl__cmd_baton_t *) baton)->ctx;
+ apr_array_header_t *targets;
+ struct log_receiver_baton lb = { 0 };
+ const char *target;
+ int i;
+ apr_array_header_t *revprops;
+ svn_opt_revision_t target_peg_revision;
+ const char *target_path_or_url;
+
+ SVN_ERR(svn_cl__args_to_target_array_print_reserved(&targets, os,
+ opt_state->targets,
+ ctx, FALSE, pool));
+
+ /* Add "." if user passed 0 arguments */
+ svn_opt_push_implicit_dot_target(targets, pool);
+
+ /* Determine if they really want a two-revision range. */
+ if (opt_state->used_change_arg)
+ {
+ if (opt_state->used_revision_arg && opt_state->revision_ranges->nelts > 1)
+ {
+ return svn_error_create
+ (SVN_ERR_CLIENT_BAD_REVISION, NULL,
+ _("-c and -r are mutually exclusive"));
+ }
+ for (i = 0; i < opt_state->revision_ranges->nelts; i++)
+ {
+ svn_opt_revision_range_t *range;
+ range = APR_ARRAY_IDX(opt_state->revision_ranges, i,
+ svn_opt_revision_range_t *);
+ if (range->start.value.number < range->end.value.number)
+ range->start.value.number++;
+ else
+ range->end.value.number++;
+ }
+ }
+
+ /* Parse the first target into path-or-url and peg revision. */
+ target = APR_ARRAY_IDX(targets, 0, const char *);
+ SVN_ERR(svn_opt_parse_path(&target_peg_revision, &target_path_or_url,
+ target, pool));
+ if (target_peg_revision.kind == svn_opt_revision_unspecified)
+ target_peg_revision.kind = (svn_path_is_url(target)
+ ? svn_opt_revision_head
+ : svn_opt_revision_working);
+ APR_ARRAY_IDX(targets, 0, const char *) = target_path_or_url;
+
+ if (svn_path_is_url(target))
+ {
+ for (i = 1; i < targets->nelts; i++)
+ {
+ target = APR_ARRAY_IDX(targets, i, const char *);
+
+ if (svn_path_is_url(target) || target[0] == '/')
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Only relative paths can be specified"
+ " after a URL for 'svn-bench log', "
+ "but '%s' is not a relative path"),
+ target);
+ }
+ }
+
+ lb.ctx = ctx;
+ lb.quiet = opt_state->quiet;
+
+ revprops = apr_array_make(pool, 3, sizeof(char *));
+ APR_ARRAY_PUSH(revprops, const char *) = SVN_PROP_REVISION_AUTHOR;
+ APR_ARRAY_PUSH(revprops, const char *) = SVN_PROP_REVISION_DATE;
+ if (!opt_state->quiet)
+ APR_ARRAY_PUSH(revprops, const char *) = SVN_PROP_REVISION_LOG;
+ SVN_ERR(svn_client_log5(targets,
+ &target_peg_revision,
+ opt_state->revision_ranges,
+ opt_state->limit,
+ opt_state->verbose,
+ opt_state->stop_on_copy,
+ opt_state->use_merge_history,
+ revprops,
+ log_entry_receiver,
+ &lb,
+ ctx,
+ pool));
+
+ if (!opt_state->quiet)
+ {
+ if (opt_state->use_merge_history)
+ SVN_ERR(svn_cmdline_printf(pool,
+ _("%15s revisions, %15s merged in %s merges\n"
+ "%15s msg lines, %15s in merged revisions\n"
+ "%15s changes, %15s in merged revisions\n"),
+ svn__ui64toa_sep(lb.revisions, ',', pool),
+ svn__ui64toa_sep(lb.merged_revs, ',', pool),
+ svn__ui64toa_sep(lb.merges, ',', pool),
+ svn__ui64toa_sep(lb.message_lines, ',', pool),
+ svn__ui64toa_sep(lb.merged_message_lines, ',', pool),
+ svn__ui64toa_sep(lb.changes, ',', pool),
+ svn__ui64toa_sep(lb.merged_changes, ',', pool)));
+ else
+ SVN_ERR(svn_cmdline_printf(pool,
+ _("%15s revisions\n"
+ "%15s msg lines\n"
+ "%15s changes\n"),
+ svn__ui64toa_sep(lb.revisions, ',', pool),
+ svn__ui64toa_sep(lb.message_lines, ',', pool),
+ svn__ui64toa_sep(lb.changes, ',', pool)));
+ }
+
+ return SVN_NO_ERROR;
+}
diff --git a/tools/client-side/svn-bench/svn-bench.c b/tools/client-side/svn-bench/svn-bench.c
new file mode 100644
index 0000000..bf8964e
--- /dev/null
+++ b/tools/client-side/svn-bench/svn-bench.c
@@ -0,0 +1,954 @@
+/*
+ * main.c: Subversion command line client.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#include <string.h>
+#include <assert.h>
+
+#include <apr_signal.h>
+
+#include "svn_cmdline.h"
+#include "svn_dirent_uri.h"
+#include "svn_pools.h"
+#include "svn_utf.h"
+#include "svn_version.h"
+
+#include "cl.h"
+
+#include "private/svn_opt_private.h"
+#include "private/svn_cmdline_private.h"
+
+#include "svn_private_config.h"
+
+
+/*** Option Processing ***/
+
+/* Add an identifier here for long options that don't have a short
+ option. Options that have both long and short options should just
+ use the short option letter as identifier. */
+typedef enum svn_cl__longopt_t {
+ opt_auth_password = SVN_OPT_FIRST_LONGOPT_ID,
+ opt_auth_username,
+ opt_config_dir,
+ opt_config_options,
+ opt_depth,
+ opt_no_auth_cache,
+ opt_non_interactive,
+ opt_stop_on_copy,
+ opt_strict,
+ opt_targets,
+ opt_version,
+ opt_with_revprop,
+ opt_with_all_revprops,
+ opt_with_no_revprops,
+ opt_trust_server_cert
+} svn_cl__longopt_t;
+
+
+/* Option codes and descriptions for the command line client.
+ *
+ * The entire list must be terminated with an entry of nulls.
+ */
+const apr_getopt_option_t svn_cl__options[] =
+{
+ {"help", 'h', 0, N_("show help on a subcommand")},
+ {NULL, '?', 0, N_("show help on a subcommand")},
+ {"quiet", 'q', 0, N_("print nothing, or only summary information")},
+ {"recursive", 'R', 0, N_("descend recursively, same as --depth=infinity")},
+ {"non-recursive", 'N', 0, N_("obsolete; try --depth=files or --depth=immediates")},
+ {"change", 'c', 1,
+ N_("the change made by revision ARG (like -r ARG-1:ARG)\n"
+ " "
+ "If ARG is negative this is like -r ARG:ARG-1\n"
+ " "
+ "If ARG is of the form ARG1-ARG2 then this is like\n"
+ " "
+ "ARG1:ARG2, where ARG1 is inclusive")},
+ {"revision", 'r', 1,
+ N_("ARG (some commands also take ARG1:ARG2 range)\n"
+ " "
+ "A revision argument can be one of:\n"
+ " "
+ " NUMBER revision number\n"
+ " "
+ " '{' DATE '}' revision at start of the date\n"
+ " "
+ " 'HEAD' latest in repository\n"
+ " "
+ " 'BASE' base rev of item's working copy\n"
+ " "
+ " 'COMMITTED' last commit at or before BASE\n"
+ " "
+ " 'PREV' revision just before COMMITTED")},
+ {"version", opt_version, 0, N_("show program version information")},
+ {"verbose", 'v', 0, N_("print extra information")},
+ {"username", opt_auth_username, 1, N_("specify a username ARG")},
+ {"password", opt_auth_password, 1, N_("specify a password ARG")},
+ {"targets", opt_targets, 1,
+ N_("pass contents of file ARG as additional args")},
+ {"depth", opt_depth, 1,
+ N_("limit operation by depth ARG ('empty', 'files',\n"
+ " "
+ "'immediates', or 'infinity')")},
+ {"strict", opt_strict, 0, N_("use strict semantics")},
+ {"stop-on-copy", opt_stop_on_copy, 0,
+ N_("do not cross copies while traversing history")},
+ {"no-auth-cache", opt_no_auth_cache, 0,
+ N_("do not cache authentication tokens")},
+ {"trust-server-cert", opt_trust_server_cert, 0,
+ N_("accept SSL server certificates from unknown\n"
+ " "
+ "certificate authorities without prompting (but only\n"
+ " "
+ "with '--non-interactive')") },
+ {"non-interactive", opt_non_interactive, 0,
+ N_("do no interactive prompting")},
+ {"config-dir", opt_config_dir, 1,
+ N_("read user configuration files from directory ARG")},
+ {"config-option", opt_config_options, 1,
+ N_("set user configuration option in the format:\n"
+ " "
+ " FILE:SECTION:OPTION=[VALUE]\n"
+ " "
+ "For example:\n"
+ " "
+ " servers:global:http-library=serf")},
+ {"limit", 'l', 1, N_("maximum number of log entries")},
+ {"with-all-revprops", opt_with_all_revprops, 0,
+ N_("retrieve all revision properties")},
+ {"with-no-revprops", opt_with_no_revprops, 0,
+ N_("retrieve no revision properties")},
+ {"with-revprop", opt_with_revprop, 1,
+ N_("set revision property ARG in new revision\n"
+ " "
+ "using the name[=value] format")},
+ {"use-merge-history", 'g', 0,
+ N_("use/display additional information from merge\n"
+ " "
+ "history")},
+
+ /* Long-opt Aliases
+ *
+ * These have NULL desriptions, but an option code that matches some
+ * other option (whose description should probably mention its aliases).
+ */
+
+ {0, 0, 0, 0},
+};
+
+
+
+/*** Command dispatch. ***/
+
+/* Our array of available subcommands.
+ *
+ * The entire list must be terminated with an entry of nulls.
+ *
+ * In most of the help text "PATH" is used where a working copy path is
+ * required, "URL" where a repository URL is required and "TARGET" when
+ * either a path or a url can be used. Hmm, should this be part of the
+ * help text?
+ */
+
+/* Options that apply to all commands. (While not every command may
+ currently require authentication or be interactive, allowing every
+ command to take these arguments allows scripts to just pass them
+ willy-nilly to every invocation of 'svn') . */
+const int svn_cl__global_options[] =
+{ opt_auth_username, opt_auth_password, opt_no_auth_cache, opt_non_interactive,
+ opt_trust_server_cert, opt_config_dir, opt_config_options, 0
+};
+
+const svn_opt_subcommand_desc2_t svn_cl__cmd_table[] =
+{
+ { "help", svn_cl__help, {"?", "h"}, N_
+ ("Describe the usage of this program or its subcommands.\n"
+ "usage: help [SUBCOMMAND...]\n"),
+ {0} },
+ /* This command is also invoked if we see option "--help", "-h" or "-?". */
+
+ { "null-export", svn_cl__null_export, {0}, N_
+ ("Create an unversioned copy of a tree.\n"
+ "usage: null-export [-r REV] URL[@PEGREV]\n"
+ "\n"
+ " Exports a clean directory tree from the repository specified by\n"
+ " URL, at revision REV if it is given, otherwise at HEAD.\n"
+ "\n"
+ " If specified, PEGREV determines in which revision the target is first\n"
+ " looked up.\n"),
+ {'r', 'q', 'N', opt_depth} },
+
+ { "null-list", svn_cl__null_list, {"ls"}, N_
+ ("List directory entries in the repository.\n"
+ "usage: list [TARGET[@REV]...]\n"
+ "\n"
+ " List each TARGET file and the contents of each TARGET directory as\n"
+ " they exist in the repository. If TARGET is a working copy path, the\n"
+ " corresponding repository URL will be used. If specified, REV determines\n"
+ " in which revision the target is first looked up.\n"
+ "\n"
+ " The default TARGET is '.', meaning the repository URL of the current\n"
+ " working directory.\n"
+ "\n"
+ " With --verbose, the following fields will be fetched for each item:\n"
+ "\n"
+ " Revision number of the last commit\n"
+ " Author of the last commit\n"
+ " If locked, the letter 'O'. (Use 'svn info URL' to see details)\n"
+ " Size (in bytes)\n"
+ " Date and time of the last commit\n"),
+ {'r', 'v', 'q', 'R', opt_depth} },
+
+ { "null-log", svn_cl__null_log, {0}, N_
+ ("Fetch the log messages for a set of revision(s) and/or path(s).\n"
+ "usage: 1. null-log [PATH][@REV]\n"
+ " 2. null-log URL[@REV] [PATH...]\n"
+ "\n"
+ " 1. Fetch the log messages for the URL corresponding to PATH\n"
+ " (default: '.'). If specified, REV is the revision in which the\n"
+ " URL is first looked up, and the default revision range is REV:1.\n"
+ " If REV is not specified, the default revision range is BASE:1,\n"
+ " since the URL might not exist in the HEAD revision.\n"
+ "\n"
+ " 2. Fetch the log messages for the PATHs (default: '.') under URL.\n"
+ " If specified, REV is the revision in which the URL is first\n"
+ " looked up, and the default revision range is REV:1; otherwise,\n"
+ " the URL is looked up in HEAD, and the default revision range is\n"
+ " HEAD:1.\n"
+ "\n"
+ " Multiple '-c' or '-r' options may be specified (but not a\n"
+ " combination of '-c' and '-r' options), and mixing of forward and\n"
+ " reverse ranges is allowed.\n"
+ "\n"
+ " With -v, also print all affected paths with each log message.\n"
+ " With -q, don't print the log message body itself (note that this is\n"
+ " compatible with -v).\n"
+ "\n"
+ " Each log message is printed just once, even if more than one of the\n"
+ " affected paths for that revision were explicitly requested. Logs\n"
+ " follow copy history by default. Use --stop-on-copy to disable this\n"
+ " behavior, which can be useful for determining branchpoints.\n"),
+ {'r', 'q', 'v', 'g', 'c', opt_targets, opt_stop_on_copy,
+ 'l', opt_with_all_revprops, opt_with_no_revprops, opt_with_revprop,
+ 'x',},
+ {{opt_with_revprop, N_("retrieve revision property ARG")},
+ {'c', N_("the change made in revision ARG")}} },
+
+ { NULL, NULL, {0}, NULL, {0} }
+};
+
+
+/* Version compatibility check */
+static svn_error_t *
+check_lib_versions(void)
+{
+ static const svn_version_checklist_t checklist[] =
+ {
+ { "svn_subr", svn_subr_version },
+ { "svn_client", svn_client_version },
+ { "svn_wc", svn_wc_version },
+ { "svn_ra", svn_ra_version },
+ { "svn_delta", svn_delta_version },
+ { NULL, NULL }
+ };
+ SVN_VERSION_DEFINE(my_version);
+
+ return svn_ver_check_list(&my_version, checklist);
+}
+
+
+/* A flag to see if we've been cancelled by the client or not. */
+static volatile sig_atomic_t cancelled = FALSE;
+
+/* A signal handler to support cancellation. */
+static void
+signal_handler(int signum)
+{
+ apr_signal(signum, SIG_IGN);
+ cancelled = TRUE;
+}
+
+/* Our cancellation callback. */
+svn_error_t *
+svn_cl__check_cancel(void *baton)
+{
+ if (cancelled)
+ return svn_error_create(SVN_ERR_CANCELLED, NULL, _("Caught signal"));
+ else
+ return SVN_NO_ERROR;
+}
+
+
+/*** Main. ***/
+
+/* Report and clear the error ERR, and return EXIT_FAILURE. */
+#define EXIT_ERROR(err) \
+ svn_cmdline_handle_exit_error(err, NULL, "svn: ")
+
+/* A redefinition of the public SVN_INT_ERR macro, that suppresses the
+ * error message if it is SVN_ERR_IO_PIPE_WRITE_ERROR. */
+#undef SVN_INT_ERR
+#define SVN_INT_ERR(expr) \
+ do { \
+ svn_error_t *svn_err__temp = (expr); \
+ if (svn_err__temp) \
+ return EXIT_ERROR(svn_err__temp); \
+ } while (0)
+
+static int
+sub_main(int argc, const char *argv[], apr_pool_t *pool)
+{
+ svn_error_t *err;
+ int opt_id;
+ apr_getopt_t *os;
+ svn_cl__opt_state_t opt_state = { 0, { 0 } };
+ svn_client_ctx_t *ctx;
+ apr_array_header_t *received_opts;
+ int i;
+ const svn_opt_subcommand_desc2_t *subcommand = NULL;
+ svn_cl__cmd_baton_t command_baton;
+ svn_auth_baton_t *ab;
+ svn_config_t *cfg_config;
+ svn_boolean_t descend = TRUE;
+ svn_boolean_t use_notifier = TRUE;
+
+ received_opts = apr_array_make(pool, SVN_OPT_MAX_OPTIONS, sizeof(int));
+
+ /* Check library versions */
+ SVN_INT_ERR(check_lib_versions());
+
+#if defined(WIN32) || defined(__CYGWIN__)
+ /* Set the working copy administrative directory name. */
+ if (getenv("SVN_ASP_DOT_NET_HACK"))
+ {
+ SVN_INT_ERR(svn_wc_set_adm_dir("_svn", pool));
+ }
+#endif
+
+ /* Initialize the RA library. */
+ SVN_INT_ERR(svn_ra_initialize(pool));
+
+ /* Begin processing arguments. */
+ opt_state.start_revision.kind = svn_opt_revision_unspecified;
+ opt_state.end_revision.kind = svn_opt_revision_unspecified;
+ opt_state.revision_ranges =
+ apr_array_make(pool, 0, sizeof(svn_opt_revision_range_t *));
+ opt_state.depth = svn_depth_unknown;
+
+ /* No args? Show usage. */
+ if (argc <= 1)
+ {
+ SVN_INT_ERR(svn_cl__help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+
+ /* Else, parse options. */
+ SVN_INT_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
+
+ os->interleave = 1;
+ while (1)
+ {
+ const char *opt_arg;
+ const char *utf8_opt_arg;
+
+ /* Parse the next option. */
+ apr_status_t apr_err = apr_getopt_long(os, svn_cl__options, &opt_id,
+ &opt_arg);
+ if (APR_STATUS_IS_EOF(apr_err))
+ break;
+ else if (apr_err)
+ {
+ SVN_INT_ERR(svn_cl__help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+
+ /* Stash the option code in an array before parsing it. */
+ APR_ARRAY_PUSH(received_opts, int) = opt_id;
+
+ switch (opt_id) {
+ case 'l':
+ {
+ err = svn_cstring_atoi(&opt_state.limit, opt_arg);
+ if (err)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, err,
+ _("Non-numeric limit argument given"));
+ return EXIT_ERROR(err);
+ }
+ if (opt_state.limit <= 0)
+ {
+ err = svn_error_create(SVN_ERR_INCORRECT_PARAMS, NULL,
+ _("Argument to --limit must be positive"));
+ return EXIT_ERROR(err);
+ }
+ }
+ break;
+ case 'c':
+ {
+ apr_array_header_t *change_revs =
+ svn_cstring_split(opt_arg, ", \n\r\t\v", TRUE, pool);
+
+ for (i = 0; i < change_revs->nelts; i++)
+ {
+ char *end;
+ svn_revnum_t changeno, changeno_end;
+ const char *change_str =
+ APR_ARRAY_IDX(change_revs, i, const char *);
+ const char *s = change_str;
+ svn_boolean_t is_negative;
+
+ /* Check for a leading minus to allow "-c -r42".
+ * The is_negative flag is used to handle "-c -42" and "-c -r42".
+ * The "-c r-42" case is handled by strtol() returning a
+ * negative number. */
+ is_negative = (*s == '-');
+ if (is_negative)
+ s++;
+
+ /* Allow any number of 'r's to prefix a revision number. */
+ while (*s == 'r')
+ s++;
+ changeno = changeno_end = strtol(s, &end, 10);
+ if (end != s && *end == '-')
+ {
+ if (changeno < 0 || is_negative)
+ {
+ err = svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR,
+ NULL,
+ _("Negative number in range (%s)"
+ " not supported with -c"),
+ change_str);
+ return EXIT_ERROR(err);
+ }
+ s = end + 1;
+ while (*s == 'r')
+ s++;
+ changeno_end = strtol(s, &end, 10);
+ }
+ if (end == change_str || *end != '\0')
+ {
+ err = svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Non-numeric change argument (%s) "
+ "given to -c"), change_str);
+ return EXIT_ERROR(err);
+ }
+
+ if (changeno == 0)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("There is no change 0"));
+ return EXIT_ERROR(err);
+ }
+
+ if (is_negative)
+ changeno = -changeno;
+
+ /* Figure out the range:
+ -c N -> -r N-1:N
+ -c -N -> -r N:N-1
+ -c M-N -> -r M-1:N for M < N
+ -c M-N -> -r M:N-1 for M > N
+ -c -M-N -> error (too confusing/no valid use case)
+ */
+ if (changeno > 0)
+ {
+ if (changeno <= changeno_end)
+ changeno--;
+ else
+ changeno_end--;
+ }
+ else
+ {
+ changeno = -changeno;
+ changeno_end = changeno - 1;
+ }
+
+ opt_state.used_change_arg = TRUE;
+ APR_ARRAY_PUSH(opt_state.revision_ranges,
+ svn_opt_revision_range_t *)
+ = svn_opt__revision_range_from_revnums(changeno, changeno_end,
+ pool);
+ }
+ }
+ break;
+ case 'r':
+ opt_state.used_revision_arg = TRUE;
+ if (svn_opt_parse_revision_to_range(opt_state.revision_ranges,
+ opt_arg, pool) != 0)
+ {
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&utf8_opt_arg, opt_arg, pool));
+ err = svn_error_createf
+ (SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Syntax error in revision argument '%s'"),
+ utf8_opt_arg);
+ return EXIT_ERROR(err);
+ }
+ break;
+ case 'v':
+ opt_state.verbose = TRUE;
+ break;
+ case 'h':
+ case '?':
+ opt_state.help = TRUE;
+ break;
+ case 'q':
+ opt_state.quiet = TRUE;
+ break;
+ case opt_targets:
+ {
+ svn_stringbuf_t *buffer, *buffer_utf8;
+
+ /* We need to convert to UTF-8 now, even before we divide
+ the targets into an array, because otherwise we wouldn't
+ know what delimiter to use for svn_cstring_split(). */
+
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&utf8_opt_arg, opt_arg, pool));
+ SVN_INT_ERR(svn_stringbuf_from_file2(&buffer, utf8_opt_arg, pool));
+ SVN_INT_ERR(svn_utf_stringbuf_to_utf8(&buffer_utf8, buffer, pool));
+ opt_state.targets = svn_cstring_split(buffer_utf8->data, "\n\r",
+ TRUE, pool);
+ }
+ break;
+ case 'N':
+ descend = FALSE;
+ break;
+ case opt_depth:
+ err = svn_utf_cstring_to_utf8(&utf8_opt_arg, opt_arg, pool);
+ if (err)
+ return EXIT_ERROR
+ (svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, err,
+ _("Error converting depth "
+ "from locale to UTF-8")));
+ opt_state.depth = svn_depth_from_word(utf8_opt_arg);
+ if (opt_state.depth == svn_depth_unknown
+ || opt_state.depth == svn_depth_exclude)
+ {
+ return EXIT_ERROR
+ (svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("'%s' is not a valid depth; try "
+ "'empty', 'files', 'immediates', "
+ "or 'infinity'"),
+ utf8_opt_arg));
+ }
+ break;
+ case opt_version:
+ opt_state.version = TRUE;
+ break;
+ case opt_auth_username:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.auth_username,
+ opt_arg, pool));
+ break;
+ case opt_auth_password:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.auth_password,
+ opt_arg, pool));
+ break;
+ case opt_stop_on_copy:
+ opt_state.stop_on_copy = TRUE;
+ break;
+ case opt_strict:
+ opt_state.strict = TRUE;
+ break;
+ case opt_no_auth_cache:
+ opt_state.no_auth_cache = TRUE;
+ break;
+ case opt_non_interactive:
+ opt_state.non_interactive = TRUE;
+ break;
+ case opt_trust_server_cert:
+ opt_state.trust_server_cert = TRUE;
+ break;
+ case 'x':
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.extensions,
+ opt_arg, pool));
+ break;
+ case opt_config_dir:
+ {
+ const char *path_utf8;
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&path_utf8, opt_arg, pool));
+ opt_state.config_dir = svn_dirent_internal_style(path_utf8, pool);
+ }
+ break;
+ case opt_config_options:
+ if (!opt_state.config_options)
+ opt_state.config_options =
+ apr_array_make(pool, 1,
+ sizeof(svn_cmdline__config_argument_t*));
+
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_arg, opt_arg, pool));
+ SVN_INT_ERR(svn_cmdline__parse_config_option(opt_state.config_options,
+ opt_arg, pool));
+ break;
+ case opt_with_all_revprops:
+ /* If --with-all-revprops is specified along with one or more
+ * --with-revprops options, --with-all-revprops takes precedence. */
+ opt_state.all_revprops = TRUE;
+ break;
+ case opt_with_no_revprops:
+ opt_state.no_revprops = TRUE;
+ break;
+ case opt_with_revprop:
+ SVN_INT_ERR(svn_opt_parse_revprop(&opt_state.revprop_table,
+ opt_arg, pool));
+ break;
+ case 'g':
+ opt_state.use_merge_history = TRUE;
+ break;
+ default:
+ /* Hmmm. Perhaps this would be a good place to squirrel away
+ opts that commands like svn diff might need. Hmmm indeed. */
+ break;
+ }
+ }
+
+ /* ### This really belongs in libsvn_client. The trouble is,
+ there's no one place there to run it from, no
+ svn_client_init(). We'd have to add it to all the public
+ functions that a client might call. It's unmaintainable to do
+ initialization from within libsvn_client itself, but it seems
+ burdensome to demand that all clients call svn_client_init()
+ before calling any other libsvn_client function... On the other
+ hand, the alternative is effectively to demand that they call
+ svn_config_ensure() instead, so maybe we should have a generic
+ init function anyway. Thoughts? */
+ SVN_INT_ERR(svn_config_ensure(opt_state.config_dir, pool));
+
+ /* If the user asked for help, then the rest of the arguments are
+ the names of subcommands to get help on (if any), or else they're
+ just typos/mistakes. Whatever the case, the subcommand to
+ actually run is svn_cl__help(). */
+ if (opt_state.help)
+ subcommand = svn_opt_get_canonical_subcommand2(svn_cl__cmd_table, "help");
+
+ /* If we're not running the `help' subcommand, then look for a
+ subcommand in the first argument. */
+ if (subcommand == NULL)
+ {
+ if (os->ind >= os->argc)
+ {
+ if (opt_state.version)
+ {
+ /* Use the "help" subcommand to handle the "--version" option. */
+ static const svn_opt_subcommand_desc2_t pseudo_cmd =
+ { "--version", svn_cl__help, {0}, "",
+ {opt_version, /* must accept its own option */
+ 'q', /* brief output */
+ 'v', /* verbose output */
+ opt_config_dir /* all commands accept this */
+ } };
+
+ subcommand = &pseudo_cmd;
+ }
+ else
+ {
+ svn_error_clear
+ (svn_cmdline_fprintf(stderr, pool,
+ _("Subcommand argument required\n")));
+ SVN_INT_ERR(svn_cl__help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+ }
+ else
+ {
+ const char *first_arg = os->argv[os->ind++];
+ subcommand = svn_opt_get_canonical_subcommand2(svn_cl__cmd_table,
+ first_arg);
+ if (subcommand == NULL)
+ {
+ const char *first_arg_utf8;
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&first_arg_utf8,
+ first_arg, pool));
+ svn_error_clear
+ (svn_cmdline_fprintf(stderr, pool,
+ _("Unknown subcommand: '%s'\n"),
+ first_arg_utf8));
+ SVN_INT_ERR(svn_cl__help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+ }
+ }
+
+ /* Check that the subcommand wasn't passed any inappropriate options. */
+ for (i = 0; i < received_opts->nelts; i++)
+ {
+ opt_id = APR_ARRAY_IDX(received_opts, i, int);
+
+ /* All commands implicitly accept --help, so just skip over this
+ when we see it. Note that we don't want to include this option
+ in their "accepted options" list because it would be awfully
+ redundant to display it in every commands' help text. */
+ if (opt_id == 'h' || opt_id == '?')
+ continue;
+
+ if (! svn_opt_subcommand_takes_option3(subcommand, opt_id,
+ svn_cl__global_options))
+ {
+ const char *optstr;
+ const apr_getopt_option_t *badopt =
+ svn_opt_get_option_from_code2(opt_id, svn_cl__options,
+ subcommand, pool);
+ svn_opt_format_option(&optstr, badopt, FALSE, pool);
+ if (subcommand->name[0] == '-')
+ SVN_INT_ERR(svn_cl__help(NULL, NULL, pool));
+ else
+ svn_error_clear
+ (svn_cmdline_fprintf
+ (stderr, pool, _("Subcommand '%s' doesn't accept option '%s'\n"
+ "Type 'svn-bench help %s' for usage.\n"),
+ subcommand->name, optstr, subcommand->name));
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Only merge and log support multiple revisions/revision ranges. */
+ if (subcommand->cmd_func != svn_cl__null_log)
+ {
+ if (opt_state.revision_ranges->nelts > 1)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Multiple revision arguments "
+ "encountered; can't specify -c twice, "
+ "or both -c and -r"));
+ return EXIT_ERROR(err);
+ }
+ }
+
+ /* Disallow simultaneous use of both --with-all-revprops and
+ --with-no-revprops. */
+ if (opt_state.all_revprops && opt_state.no_revprops)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--with-all-revprops and --with-no-revprops "
+ "are mutually exclusive"));
+ return EXIT_ERROR(err);
+ }
+
+ /* Disallow simultaneous use of both --with-revprop and
+ --with-no-revprops. */
+ if (opt_state.revprop_table && opt_state.no_revprops)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--with-revprop and --with-no-revprops "
+ "are mutually exclusive"));
+ return EXIT_ERROR(err);
+ }
+
+ /* --trust-server-cert can only be used with --non-interactive */
+ if (opt_state.trust_server_cert && !opt_state.non_interactive)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--trust-server-cert requires "
+ "--non-interactive"));
+ return EXIT_ERROR(err);
+ }
+
+ /* Ensure that 'revision_ranges' has at least one item, and make
+ 'start_revision' and 'end_revision' match that item. */
+ if (opt_state.revision_ranges->nelts == 0)
+ {
+ svn_opt_revision_range_t *range = apr_palloc(pool, sizeof(*range));
+ range->start.kind = svn_opt_revision_unspecified;
+ range->end.kind = svn_opt_revision_unspecified;
+ APR_ARRAY_PUSH(opt_state.revision_ranges,
+ svn_opt_revision_range_t *) = range;
+ }
+ opt_state.start_revision = APR_ARRAY_IDX(opt_state.revision_ranges, 0,
+ svn_opt_revision_range_t *)->start;
+ opt_state.end_revision = APR_ARRAY_IDX(opt_state.revision_ranges, 0,
+ svn_opt_revision_range_t *)->end;
+
+ /* Create a client context object. */
+ command_baton.opt_state = &opt_state;
+ SVN_INT_ERR(svn_client_create_context2(&ctx, NULL, pool));
+ command_baton.ctx = ctx;
+
+ /* Only a few commands can accept a revision range; the rest can take at
+ most one revision number. */
+ if (subcommand->cmd_func != svn_cl__null_log)
+ {
+ if (opt_state.end_revision.kind != svn_opt_revision_unspecified)
+ {
+ err = svn_error_create(SVN_ERR_CLIENT_REVISION_RANGE, NULL, NULL);
+ return EXIT_ERROR(err);
+ }
+ }
+
+ /* -N has a different meaning depending on the command */
+ if (!descend)
+ opt_state.depth = svn_depth_files;
+
+ err = svn_config_get_config(&(ctx->config),
+ opt_state.config_dir, pool);
+ if (err)
+ {
+ /* Fallback to default config if the config directory isn't readable
+ or is not a directory. */
+ if (APR_STATUS_IS_EACCES(err->apr_err)
+ || SVN__APR_STATUS_IS_ENOTDIR(err->apr_err))
+ {
+ svn_handle_warning2(stderr, err, "svn: ");
+ svn_error_clear(err);
+ }
+ else
+ return EXIT_ERROR(err);
+ }
+
+ cfg_config = apr_hash_get(ctx->config, SVN_CONFIG_CATEGORY_CONFIG,
+ APR_HASH_KEY_STRING);
+
+ /* Update the options in the config */
+ if (opt_state.config_options)
+ {
+ svn_error_clear(
+ svn_cmdline__apply_config_options(ctx->config,
+ opt_state.config_options,
+ "svn: ", "--config-option"));
+ }
+
+ /* Set up the notifier.
+
+ In general, we use it any time we aren't in --quiet mode. 'svn
+ status' is unique, though, in that we don't want it in --quiet mode
+ unless we're also in --verbose mode. When in --xml mode,
+ though, we never want it. */
+ if (opt_state.quiet)
+ use_notifier = FALSE;
+ if (use_notifier)
+ {
+ SVN_INT_ERR(svn_cl__get_notifier(&ctx->notify_func2, &ctx->notify_baton2,
+ pool));
+ }
+
+ /* Set up our cancellation support. */
+ ctx->cancel_func = svn_cl__check_cancel;
+ apr_signal(SIGINT, signal_handler);
+#ifdef SIGBREAK
+ /* SIGBREAK is a Win32 specific signal generated by ctrl-break. */
+ apr_signal(SIGBREAK, signal_handler);
+#endif
+#ifdef SIGHUP
+ apr_signal(SIGHUP, signal_handler);
+#endif
+#ifdef SIGTERM
+ apr_signal(SIGTERM, signal_handler);
+#endif
+
+#ifdef SIGPIPE
+ /* Disable SIGPIPE generation for the platforms that have it. */
+ apr_signal(SIGPIPE, SIG_IGN);
+#endif
+
+#ifdef SIGXFSZ
+ /* Disable SIGXFSZ generation for the platforms that have it, otherwise
+ * working with large files when compiled against an APR that doesn't have
+ * large file support will crash the program, which is uncool. */
+ apr_signal(SIGXFSZ, SIG_IGN);
+#endif
+
+ /* Set up Authentication stuff. */
+ SVN_INT_ERR(svn_cmdline_create_auth_baton(&ab,
+ opt_state.non_interactive,
+ opt_state.auth_username,
+ opt_state.auth_password,
+ opt_state.config_dir,
+ opt_state.no_auth_cache,
+ opt_state.trust_server_cert,
+ cfg_config,
+ ctx->cancel_func,
+ ctx->cancel_baton,
+ pool));
+
+ ctx->auth_baton = ab;
+
+ /* The new svn behavior is to postpone everything until after the operation
+ completed */
+ ctx->conflict_func = NULL;
+ ctx->conflict_baton = NULL;
+ ctx->conflict_func2 = NULL;
+ ctx->conflict_baton2 = NULL;
+
+ /* And now we finally run the subcommand. */
+ err = (*subcommand->cmd_func)(os, &command_baton, pool);
+ if (err)
+ {
+ /* For argument-related problems, suggest using the 'help'
+ subcommand. */
+ if (err->apr_err == SVN_ERR_CL_INSUFFICIENT_ARGS
+ || err->apr_err == SVN_ERR_CL_ARG_PARSING_ERROR)
+ {
+ err = svn_error_quick_wrap(
+ err, apr_psprintf(pool,
+ _("Try 'svn-bench help %s' for more information"),
+ subcommand->name));
+ }
+ if (err->apr_err == SVN_ERR_WC_UPGRADE_REQUIRED)
+ {
+ err = svn_error_quick_wrap(err,
+ _("Please see the 'svn upgrade' command"));
+ }
+
+ /* Tell the user about 'svn cleanup' if any error on the stack
+ was about locked working copies. */
+ if (svn_error_find_cause(err, SVN_ERR_WC_LOCKED))
+ {
+ err = svn_error_quick_wrap(
+ err, _("Run 'svn cleanup' to remove locks "
+ "(type 'svn help cleanup' for details)"));
+ }
+
+ return EXIT_ERROR(err);
+ }
+ else
+ {
+ /* Ensure that stdout is flushed, so the user will see any write errors.
+ This makes sure that output is not silently lost. */
+ SVN_INT_ERR(svn_cmdline_fflush(stdout));
+
+ return EXIT_SUCCESS;
+ }
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svn", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ exit_code = sub_main(argc, argv, pool);
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/client-side/svn-bench/util.c b/tools/client-side/svn-bench/util.c
new file mode 100644
index 0000000..2aedde6
--- /dev/null
+++ b/tools/client-side/svn-bench/util.c
@@ -0,0 +1,92 @@
+/*
+ * util.c: Subversion command line client utility functions. Any
+ * functions that need to be shared across subcommands should be put
+ * in here.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#include <string.h>
+#include <ctype.h>
+#include <assert.h>
+
+#include "svn_private_config.h"
+#include "svn_error.h"
+#include "svn_path.h"
+
+#include "cl.h"
+
+
+
+svn_error_t *
+svn_cl__args_to_target_array_print_reserved(apr_array_header_t **targets,
+ apr_getopt_t *os,
+ const apr_array_header_t *known_targets,
+ svn_client_ctx_t *ctx,
+ svn_boolean_t keep_last_origpath_on_truepath_collision,
+ apr_pool_t *pool)
+{
+ svn_error_t *err = svn_client_args_to_target_array2(targets,
+ os,
+ known_targets,
+ ctx,
+ keep_last_origpath_on_truepath_collision,
+ pool);
+ if (err)
+ {
+ if (err->apr_err == SVN_ERR_RESERVED_FILENAME_SPECIFIED)
+ {
+ svn_handle_error2(err, stderr, FALSE, "svn: Skipping argument: ");
+ svn_error_clear(err);
+ }
+ else
+ return svn_error_trace(err);
+ }
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_cl__check_target_is_local_path(const char *target)
+{
+ if (svn_path_is_url(target))
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("'%s' is not a local path"), target);
+ return SVN_NO_ERROR;
+}
+
+const char *
+svn_cl__local_style_skip_ancestor(const char *parent_path,
+ const char *path,
+ apr_pool_t *pool)
+{
+ const char *relpath = NULL;
+
+ if (parent_path)
+ relpath = svn_dirent_skip_ancestor(parent_path, path);
+
+ return svn_dirent_local_style(relpath ? relpath : path, pool);
+}
+
diff --git a/tools/client-side/svn-ssl-fingerprints.sh b/tools/client-side/svn-ssl-fingerprints.sh
index 6d1fd92..6fed58b 100755
--- a/tools/client-side/svn-ssl-fingerprints.sh
+++ b/tools/client-side/svn-ssl-fingerprints.sh
@@ -28,6 +28,6 @@
CONFIG_DIR=${1-$HOME/.subversion}
for i in $CONFIG_DIR/auth/svn.ssl.server/????????????????????????????????; do
grep :// $i
- grep '.\{80\}' $i | sed 's/\(.\{64\}\)/\1\n/g' | openssl base64 -d | openssl x509 -inform der -noout -fingerprint | sed 's/=/\n/'
+ grep '.\{80\}' $i | sed 's/\(.\{64\}\)/\1 /g' | xargs -n1 | openssl base64 -d | openssl x509 -inform der -noout -fingerprint | sed 's/=/ /' | xargs -n1
echo
done
diff --git a/tools/client-side/svn-viewspec.py b/tools/client-side/svn-viewspec.py
index 794460a..cdcd495 100755
--- a/tools/client-side/svn-viewspec.py
+++ b/tools/client-side/svn-viewspec.py
@@ -20,6 +20,8 @@
# ====================================================================
"""\
+__SCRIPTNAME__: checkout utility for sparse Subversion working copies
+
Usage: 1. __SCRIPTNAME__ checkout VIEWSPEC-FILE TARGET-DIR
2. __SCRIPTNAME__ examine VIEWSPEC-FILE
3. __SCRIPTNAME__ help
diff --git a/tools/client-side/svnmucc/svnmucc-test.py b/tools/client-side/svnmucc/svnmucc-test.py
deleted file mode 100755
index c09d15c..0000000
--- a/tools/client-side/svnmucc/svnmucc-test.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# Usage: svnmucc-test.py [build-dir-top [base-url]]
-
-import sys
-import os
-import re
-import shutil
-
-# calculate the absolute directory in which this test script lives
-this_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
-
-# add the Subversion Python test suite libraries to the path, and import
-sys.path.insert(0, '%s/../../../subversion/tests/cmdline' % (this_dir))
-import svntest
-
-# setup the global 'svntest.main.options' object so functions in the
-# module don't freak out.
-svntest.main._parse_options(arglist=[])
-
-# calculate the top of the build tree
-if len(sys.argv) > 1:
- build_top = os.path.abspath(sys.argv[1])
-else:
- build_top = os.path.abspath('%s/../../../' % (this_dir))
-
-# where lives svnmucc?
-svnmucc_binary = \
- os.path.abspath('%s/tools/client-side/svnmucc/svnmucc' % (build_top))
-
-# override some svntest binary locations
-svntest.main.svn_binary = \
- os.path.abspath('%s/subversion/svn/svn' % (build_top))
-svntest.main.svnlook_binary = \
- os.path.abspath('%s/subversion/svnlook/svnlook' % (build_top))
-svntest.main.svnadmin_binary = \
- os.path.abspath('%s/subversion/svnadmin/svnadmin' % (build_top))
-
-# where lives the test repository?
-repos_path = \
- os.path.abspath(('%s/tools/client-side/svnmucc/svnmucc-test-repos'
- % (build_top)))
-
-if (len(sys.argv) > 2):
- repos_url = sys.argv[2] + '/svnmucc-test-repos'
-else:
- repos_url = 'file://' + repos_path
-
-def die(msg):
- """Write MSG (formatted as a failure) to stderr, and exit with a
- non-zero errorcode."""
-
- sys.stderr.write("FAIL: " + msg + "\n")
- sys.exit(1)
-
-
-_svnmucc_re = re.compile('^(r[0-9]+) committed by svnmuccuser at (.*)$')
-_log_re = re.compile('^ ([ADRM] /[^\(]+($| \(from .*:[0-9]+\)$))')
-_err_re = re.compile('^svnmucc: (.*)$')
-
-def xrun_svnmucc(expected_errors, *varargs):
- """Run svnmucc with the list of SVNMUCC_ARGS arguments. Verify that
- its run results match the list of EXPECTED_ERRORS."""
-
- # First, run svnmucc.
- exit_code, outlines, errlines = \
- svntest.main.run_command(svnmucc_binary, 1, 0,
- '-U', repos_url,
- '-u', 'svnmuccuser',
- '-p', 'svnmuccpass',
- '--config-dir', 'dummy',
- *varargs)
- errors = []
- for line in errlines:
- match = _err_re.match(line)
- if match:
- errors.append(line.rstrip('\n\r'))
- if errors != expected_errors:
- raise svntest.main.SVNUnmatchedError(str(errors))
-
-
-def run_svnmucc(expected_path_changes, *varargs):
- """Run svnmucc with the list of SVNMUCC_ARGS arguments. Verify that
- its run results in a new commit with 'svn log -rHEAD' changed paths
- that match the list of EXPECTED_PATH_CHANGES."""
-
- # First, run svnmucc.
- exit_code, outlines, errlines = \
- svntest.main.run_command(svnmucc_binary, 1, 0,
- '-U', repos_url,
- '-u', 'svnmuccuser',
- '-p', 'svnmuccpass',
- '--config-dir', 'dummy',
- *varargs)
- if errlines:
- raise svntest.main.SVNCommitFailure(str(errlines))
- if len(outlines) != 1 or not _svnmucc_re.match(outlines[0]):
- raise svntest.main.SVNLineUnequal(str(outlines))
-
- # Now, run 'svn log -vq -rHEAD'
- changed_paths = []
- exit_code, outlines, errlines = \
- svntest.main.run_svn(None, 'log', '-vqrHEAD', repos_url)
- if errlines:
- raise svntest.Failure("Unable to verify commit with 'svn log': %s"
- % (str(errlines)))
- for line in outlines:
- match = _log_re.match(line)
- if match:
- changed_paths.append(match.group(1).rstrip('\n\r'))
-
- expected_path_changes.sort()
- changed_paths.sort()
- if changed_paths != expected_path_changes:
- raise svntest.Failure("Logged path changes differ from expectations\n"
- " expected: %s\n"
- " actual: %s" % (str(expected_path_changes),
- str(changed_paths)))
-
-
-def main():
- """Test svnmucc."""
-
- # revision 1
- run_svnmucc(['A /foo'
- ], # ---------
- 'mkdir', 'foo')
-
- # revision 2
- run_svnmucc(['A /z.c',
- ], # ---------
- 'put', '/dev/null', 'z.c')
-
- # revision 3
- run_svnmucc(['A /foo/z.c (from /z.c:2)',
- 'A /foo/bar (from /foo:2)',
- ], # ---------
- 'cp', '2', 'z.c', 'foo/z.c',
- 'cp', '2', 'foo', 'foo/bar')
-
- # revision 4
- run_svnmucc(['A /zig (from /foo:3)',
- 'D /zig/bar',
- 'D /foo',
- 'A /zig/zag (from /foo:3)',
- ], # ---------
- 'cp', '3', 'foo', 'zig',
- 'rm', 'zig/bar',
- 'mv', 'foo', 'zig/zag')
-
- # revision 5
- run_svnmucc(['D /z.c',
- 'A /zig/zag/bar/y.c (from /z.c:4)',
- 'A /zig/zag/bar/x.c (from /z.c:2)',
- ], # ---------
- 'mv', 'z.c', 'zig/zag/bar/y.c',
- 'cp', '2', 'z.c', 'zig/zag/bar/x.c')
-
- # revision 6
- run_svnmucc(['D /zig/zag/bar/y.c',
- 'A /zig/zag/bar/y y.c (from /zig/zag/bar/y.c:5)',
- 'A /zig/zag/bar/y%20y.c (from /zig/zag/bar/y.c:5)',
- ], # ---------
- 'mv', 'zig/zag/bar/y.c', 'zig/zag/bar/y%20y.c',
- 'cp', 'HEAD', 'zig/zag/bar/y.c', 'zig/zag/bar/y%2520y.c')
-
- # revision 7
- run_svnmucc(['D /zig/zag/bar/y y.c',
- 'A /zig/zag/bar/z z1.c (from /zig/zag/bar/y y.c:6)',
- 'A /zig/zag/bar/z%20z.c (from /zig/zag/bar/y%20y.c:6)',
- 'A /zig/zag/bar/z z2.c (from /zig/zag/bar/y y.c:6)',
- ], #---------
- 'mv', 'zig/zag/bar/y%20y.c', 'zig/zag/bar/z z1.c',
- 'cp', 'HEAD', 'zig/zag/bar/y%2520y.c', 'zig/zag/bar/z%2520z.c',
- 'cp', 'HEAD', 'zig/zag/bar/y y.c', 'zig/zag/bar/z z2.c')
-
- # revision 8
- run_svnmucc(['D /zig/zag',
- 'A /zig/foo (from /zig/zag:7)',
- 'D /zig/foo/bar/z%20z.c',
- 'D /zig/foo/bar/z z2.c',
- 'R /zig/foo/bar/z z1.c (from /zig/zag/bar/x.c:5)',
- ], #---------
- 'mv', 'zig/zag', 'zig/foo',
- 'rm', 'zig/foo/bar/z z1.c',
- 'rm', 'zig/foo/bar/z%20z2.c',
- 'rm', 'zig/foo/bar/z%2520z.c',
- 'cp', '5', 'zig/zag/bar/x.c', 'zig/foo/bar/z%20z1.c')
-
- # revision 9
- run_svnmucc(['R /zig/foo/bar (from /zig/z.c:8)',
- ], #---------
- 'rm', 'zig/foo/bar',
- 'cp', '8', 'zig/z.c', 'zig/foo/bar')
-
- # revision 10
- run_svnmucc(['R /zig/foo/bar (from /zig/foo/bar:8)',
- 'D /zig/foo/bar/z z1.c',
- ], #---------
- 'rm', 'zig/foo/bar',
- 'cp', '8', 'zig/foo/bar', 'zig/foo/bar',
- 'rm', 'zig/foo/bar/z%20z1.c')
-
- # revision 11
- run_svnmucc(['R /zig/foo (from /zig/foo/bar:10)',
- ], #---------
- 'rm', 'zig/foo',
- 'cp', 'head', 'zig/foo/bar', 'zig/foo')
-
- # revision 12
- run_svnmucc(['D /zig',
- 'A /foo (from /foo:3)',
- 'A /foo/foo (from /foo:3)',
- 'A /foo/foo/foo (from /foo:3)',
- 'D /foo/foo/bar',
- 'R /foo/foo/foo/bar (from /foo:3)',
- ], #---------
- 'rm', 'zig',
- 'cp', '3', 'foo', 'foo',
- 'cp', '3', 'foo', 'foo/foo',
- 'cp', '3', 'foo', 'foo/foo/foo',
- 'rm', 'foo/foo/bar',
- 'rm', 'foo/foo/foo/bar',
- 'cp', '3', 'foo', 'foo/foo/foo/bar')
-
- # revision 13
- run_svnmucc(['A /boozle (from /foo:3)',
- 'A /boozle/buz',
- 'A /boozle/buz/nuz',
- ], #---------
- 'cp', '3', 'foo', 'boozle',
- 'mkdir', 'boozle/buz',
- 'mkdir', 'boozle/buz/nuz')
-
- # revision 14
- run_svnmucc(['A /boozle/buz/svnmucc-test.py',
- 'A /boozle/guz (from /boozle/buz:13)',
- 'A /boozle/guz/svnmucc-test.py',
- ], #---------
- 'put', '/dev/null', 'boozle/buz/svnmucc-test.py',
- 'cp', '13', 'boozle/buz', 'boozle/guz',
- 'put', '/dev/null', 'boozle/guz/svnmucc-test.py')
-
- # revision 15
- run_svnmucc(['M /boozle/buz/svnmucc-test.py',
- 'R /boozle/guz/svnmucc-test.py',
- ], #---------
- 'put', sys.argv[0], 'boozle/buz/svnmucc-test.py',
- 'rm', 'boozle/guz/svnmucc-test.py',
- 'put', sys.argv[0], 'boozle/guz/svnmucc-test.py')
-
- # revision 16
- run_svnmucc(['R /foo/bar (from /foo/foo:15)'], #---------
- 'rm', 'foo/bar',
- 'cp', '15', 'foo/foo', 'foo/bar',
- 'propset', 'testprop', 'true', 'foo/bar')
-
- # revision 17
- run_svnmucc(['M /foo/bar'], #---------
- 'propdel', 'testprop', 'foo/bar')
-
- # revision 18
- run_svnmucc(['M /foo/z.c',
- 'M /foo/foo',
- ], #---------
- 'propset', 'testprop', 'true', 'foo/z.c',
- 'propset', 'testprop', 'true', 'foo/foo')
-
- # revision 19
- run_svnmucc(['M /foo/z.c',
- 'M /foo/foo',
- ], #---------
- 'propsetf', 'testprop', sys.argv[0], 'foo/z.c',
- 'propsetf', 'testprop', sys.argv[0], 'foo/foo')
-
- # Expected missing revision error
- xrun_svnmucc(["svnmucc: E200004: 'a' is not a revision"
- ], #---------
- 'cp', 'a', 'b')
-
- # Expected cannot be younger error
- xrun_svnmucc(['svnmucc: E205000: Copy source revision cannot be younger ' +
- 'than base revision',
- ], #---------
- 'cp', '42', 'a', 'b')
-
- # Expected already exists error
- xrun_svnmucc(["svnmucc: E125002: 'foo' already exists",
- ], #---------
- 'cp', '17', 'a', 'foo')
-
- # Expected copy_src already exists error
- xrun_svnmucc(["svnmucc: E125002: 'a/bar' (from 'foo/bar:17') already exists",
- ], #---------
- 'cp', '17', 'foo', 'a',
- 'cp', '17', 'foo/foo', 'a/bar')
-
- # Expected not found error
- xrun_svnmucc(["svnmucc: E125002: 'a' not found",
- ], #---------
- 'cp', '17', 'a', 'b')
-
-if __name__ == "__main__":
- try:
- # remove any previously existing repository, then create a new one
- if os.path.exists(repos_path):
- shutil.rmtree(repos_path)
- exit_code, outlines, errlines = \
- svntest.main.run_svnadmin('create', '--fs-type',
- 'fsfs', repos_path)
- if errlines:
- raise svntest.main.SVNRepositoryCreateFailure(repos_path)
- fp = open(os.path.join(repos_path, 'conf', 'svnserve.conf'), 'w')
- fp.write('[general]\nauth-access = write\npassword-db = passwd\n')
- fp.close()
- fp = open(os.path.join(repos_path, 'conf', 'passwd'), 'w')
- fp.write('[users]\nsvnmuccuser = svnmuccpass\n')
- fp.close()
- main()
- except SystemExit, e:
- raise
- except svntest.main.SVNCommitFailure, e:
- die("Error committing via svnmucc: %s" % (str(e)))
- except svntest.main.SVNLineUnequal, e:
- die("Unexpected svnmucc output line: %s" % (str(e)))
- except svntest.main.SVNRepositoryCreateFailure, e:
- die("Error creating test repository: %s" % (str(e)))
- except svntest.Failure, e:
- die("Test failed: %s" % (str(e)))
- except Exception, e:
- die("Something bad happened: %s" % (str(e)))
-
- # cleanup the repository on a successful run
- try:
- if os.path.exists(repos_path):
- shutil.rmtree(repos_path)
- except:
- pass
- print("SUCCESS!")
diff --git a/tools/client-side/svnmucc/svnmucc.c b/tools/client-side/svnmucc/svnmucc.c
deleted file mode 100644
index b33d6a9..0000000
--- a/tools/client-side/svnmucc/svnmucc.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-/*
- * svnmucc.c: Subversion Multiple URL Client
- *
- * ====================================================================
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * ====================================================================
- *
- */
-
-/* Multiple URL Command Client
-
- Combine a list of mv, cp and rm commands on URLs into a single commit.
-
- How it works: the command line arguments are parsed into an array of
- action structures. The action structures are interpreted to build a
- tree of operation structures. The tree of operation structures is
- used to drive an RA commit editor to produce a single commit.
-
- To build this client, type 'make svnmucc' from the root of your
- Subversion source directory.
-*/
-
-#include <stdio.h>
-#include <string.h>
-
-#include <apr_lib.h>
-
-#include "svn_client.h"
-#include "svn_cmdline.h"
-#include "svn_config.h"
-#include "svn_error.h"
-#include "svn_path.h"
-#include "svn_pools.h"
-#include "svn_props.h"
-#include "svn_ra.h"
-#include "svn_string.h"
-#include "svn_subst.h"
-#include "svn_utf.h"
-#include "svn_version.h"
-#include "private/svn_cmdline_private.h"
-
-static void handle_error(svn_error_t *err, apr_pool_t *pool)
-{
- if (err)
- svn_handle_error2(err, stderr, FALSE, "svnmucc: ");
- svn_error_clear(err);
- if (pool)
- svn_pool_destroy(pool);
- exit(EXIT_FAILURE);
-}
-
-static apr_pool_t *
-init(const char *application)
-{
- apr_allocator_t *allocator;
- apr_pool_t *pool;
- svn_error_t *err;
- const svn_version_checklist_t checklist[] = {
- {"svn_client", svn_client_version},
- {"svn_subr", svn_subr_version},
- {"svn_ra", svn_ra_version},
- {NULL, NULL}
- };
-
- SVN_VERSION_DEFINE(my_version);
-
- if (svn_cmdline_init(application, stderr)
- || apr_allocator_create(&allocator))
- exit(EXIT_FAILURE);
-
- err = svn_ver_check_list(&my_version, checklist);
- if (err)
- handle_error(err, NULL);
-
- apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);
- pool = svn_pool_create_ex(NULL, allocator);
- apr_allocator_owner_set(allocator, pool);
-
- return pool;
-}
-
-static svn_error_t *
-open_tmp_file(apr_file_t **fp,
- void *callback_baton,
- apr_pool_t *pool)
-{
- /* Open a unique file; use APR_DELONCLOSE. */
- return svn_io_open_unique_file3(fp, NULL, NULL, svn_io_file_del_on_close,
- pool, pool);
-}
-
-static svn_error_t *
-create_ra_callbacks(svn_ra_callbacks2_t **callbacks,
- const char *username,
- const char *password,
- const char *config_dir,
- svn_config_t *cfg_config,
- svn_boolean_t non_interactive,
- svn_boolean_t no_auth_cache,
- apr_pool_t *pool)
-{
- SVN_ERR(svn_ra_create_callbacks(callbacks, pool));
-
- SVN_ERR(svn_cmdline_create_auth_baton(&(*callbacks)->auth_baton,
- non_interactive,
- username, password, config_dir,
- no_auth_cache,
- FALSE /* trust_server_certs */,
- cfg_config, NULL, NULL, pool));
-
- (*callbacks)->open_tmp_file = open_tmp_file;
-
- return SVN_NO_ERROR;
-}
-
-
-
-static svn_error_t *
-commit_callback(const svn_commit_info_t *commit_info,
- void *baton,
- apr_pool_t *pool)
-{
- SVN_ERR(svn_cmdline_printf(pool, "r%ld committed by %s at %s\n",
- commit_info->revision,
- (commit_info->author
- ? commit_info->author : "(no author)"),
- commit_info->date));
- return SVN_NO_ERROR;
-}
-
-typedef enum action_code_t {
- ACTION_MV,
- ACTION_MKDIR,
- ACTION_CP,
- ACTION_PROPSET,
- ACTION_PROPSETF,
- ACTION_PROPDEL,
- ACTION_PUT,
- ACTION_RM
-} action_code_t;
-
-struct operation {
- enum {
- OP_OPEN,
- OP_DELETE,
- OP_ADD,
- OP_REPLACE,
- OP_PROPSET /* only for files for which no other operation is
- occuring; directories are OP_OPEN with non-empty
- props */
- } operation;
- svn_node_kind_t kind; /* to copy, mkdir, put or set revprops */
- svn_revnum_t rev; /* to copy, valid for add and replace */
- const char *url; /* to copy, valid for add and replace */
- const char *src_file; /* for put, the source file for contents */
- apr_hash_t *children; /* const char *path -> struct operation * */
- apr_hash_t *prop_mods; /* const char *prop_name ->
- const svn_string_t *prop_value */
- apr_array_header_t *prop_dels; /* const char *prop_name deletions */
- void *baton; /* as returned by the commit editor */
-};
-
-
-/* An iterator (for use via apr_table_do) which sets node properties.
- REC is a pointer to a struct driver_state. */
-static svn_error_t *
-change_props(const svn_delta_editor_t *editor,
- void *baton,
- struct operation *child,
- apr_pool_t *pool)
-{
- apr_pool_t *iterpool = svn_pool_create(pool);
-
- if (child->prop_dels)
- {
- int i;
- for (i = 0; i < child->prop_dels->nelts; i++)
- {
- const char *prop_name;
-
- svn_pool_clear(iterpool);
- prop_name = APR_ARRAY_IDX(child->prop_dels, i, const char *);
- if (child->kind == svn_node_dir)
- SVN_ERR(editor->change_dir_prop(baton, prop_name,
- NULL, iterpool));
- else
- SVN_ERR(editor->change_file_prop(baton, prop_name,
- NULL, iterpool));
- }
- }
- if (apr_hash_count(child->prop_mods))
- {
- apr_hash_index_t *hi;
- for (hi = apr_hash_first(pool, child->prop_mods);
- hi; hi = apr_hash_next(hi))
- {
- const void *key;
- void *val;
-
- svn_pool_clear(iterpool);
- apr_hash_this(hi, &key, NULL, &val);
- if (child->kind == svn_node_dir)
- SVN_ERR(editor->change_dir_prop(baton, key, val, iterpool));
- else
- SVN_ERR(editor->change_file_prop(baton, key, val, iterpool));
- }
- }
-
- svn_pool_destroy(iterpool);
- return SVN_NO_ERROR;
-}
-
-
-/* Drive EDITOR to affect the change represented by OPERATION. HEAD
- is the last-known youngest revision in the repository. */
-static svn_error_t *
-drive(struct operation *operation,
- svn_revnum_t head,
- const svn_delta_editor_t *editor,
- apr_pool_t *pool)
-{
- apr_pool_t *subpool = svn_pool_create(pool);
- apr_hash_index_t *hi;
-
- for (hi = apr_hash_first(pool, operation->children);
- hi; hi = apr_hash_next(hi))
- {
- const void *key;
- void *val;
- struct operation *child;
- void *file_baton = NULL;
-
- svn_pool_clear(subpool);
- apr_hash_this(hi, &key, NULL, &val);
- child = val;
-
- /* Deletes and replacements are simple -- delete something. */
- if (child->operation == OP_DELETE || child->operation == OP_REPLACE)
- {
- SVN_ERR(editor->delete_entry(key, head, operation->baton, subpool));
- }
- /* Opens could be for directories or files. */
- if (child->operation == OP_OPEN || child->operation == OP_PROPSET)
- {
- if (child->kind == svn_node_dir)
- {
- SVN_ERR(editor->open_directory(key, operation->baton, head,
- subpool, &child->baton));
- }
- else
- {
- SVN_ERR(editor->open_file(key, operation->baton, head,
- subpool, &file_baton));
- }
- }
- /* Adds and replacements could also be for directories or files. */
- if (child->operation == OP_ADD || child->operation == OP_REPLACE)
- {
- if (child->kind == svn_node_dir)
- {
- SVN_ERR(editor->add_directory(key, operation->baton,
- child->url, child->rev,
- subpool, &child->baton));
- }
- else
- {
- SVN_ERR(editor->add_file(key, operation->baton, child->url,
- child->rev, subpool, &file_baton));
- }
- }
- /* If there's a source file and an open file baton, we get to
- change textual contents. */
- if ((child->src_file) && (file_baton))
- {
- svn_txdelta_window_handler_t handler;
- void *handler_baton;
- svn_stream_t *contents;
- apr_file_t *f = NULL;
-
- SVN_ERR(editor->apply_textdelta(file_baton, NULL, subpool,
- &handler, &handler_baton));
- if (strcmp(child->src_file, "-"))
- {
- SVN_ERR(svn_io_file_open(&f, child->src_file, APR_READ,
- APR_OS_DEFAULT, pool));
- }
- else
- {
- apr_status_t apr_err = apr_file_open_stdin(&f, pool);
- if (apr_err)
- return svn_error_wrap_apr(apr_err, "Can't open stdin");
- }
- contents = svn_stream_from_aprfile2(f, FALSE, pool);
- SVN_ERR(svn_txdelta_send_stream(contents, handler,
- handler_baton, NULL, pool));
- }
- /* If we opened a file, we need to apply outstanding propmods,
- then close it. */
- if (file_baton)
- {
- if (child->kind == svn_node_file)
- {
- SVN_ERR(change_props(editor, file_baton, child, subpool));
- }
- SVN_ERR(editor->close_file(file_baton, NULL, subpool));
- }
- /* If we opened, added, or replaced a directory, we need to
- recurse, apply outstanding propmods, and then close it. */
- if ((child->kind == svn_node_dir)
- && (child->operation == OP_OPEN
- || child->operation == OP_ADD
- || child->operation == OP_REPLACE))
- {
- SVN_ERR(drive(child, head, editor, subpool));
- if (child->kind == svn_node_dir)
- {
- SVN_ERR(change_props(editor, child->baton, child, subpool));
- }
- SVN_ERR(editor->close_directory(child->baton, subpool));
- }
- }
- svn_pool_destroy(subpool);
- return SVN_NO_ERROR;
-}
-
-
-/* Find the operation associated with PATH, which is a single-path
- component representing a child of the path represented by
- OPERATION. If no such child operation exists, create a new one of
- type OP_OPEN. */
-static struct operation *
-get_operation(const char *path,
- struct operation *operation,
- apr_pool_t *pool)
-{
- struct operation *child = apr_hash_get(operation->children, path,
- APR_HASH_KEY_STRING);
- if (! child)
- {
- child = apr_pcalloc(pool, sizeof(*child));
- child->children = apr_hash_make(pool);
- child->operation = OP_OPEN;
- child->rev = SVN_INVALID_REVNUM;
- child->kind = svn_node_dir;
- child->prop_mods = apr_hash_make(pool);
- child->prop_dels = apr_array_make(pool, 1, sizeof(const char *));
- apr_hash_set(operation->children, path, APR_HASH_KEY_STRING, child);
- }
- return child;
-}
-
-/* Return the portion of URL that is relative to ANCHOR (URI-decoded). */
-static const char *
-subtract_anchor(const char *anchor, const char *url, apr_pool_t *pool)
-{
- if (! strcmp(url, anchor))
- return "";
- else
- return svn_uri__is_child(anchor, url, pool);
-}
-
-/* Add PATH to the operations tree rooted at OPERATION, creating any
- intermediate nodes that are required. Here's what's expected for
- each action type:
-
- ACTION URL REV SRC-FILE PROPNAME
- ------------ ----- ------- -------- --------
- ACTION_MKDIR NULL invalid NULL NULL
- ACTION_CP valid valid NULL NULL
- ACTION_PUT NULL invalid valid NULL
- ACTION_RM NULL invalid NULL NULL
- ACTION_PROPSET valid invalid NULL valid
- ACTION_PROPDEL valid invalid NULL valid
-
- Node type information is obtained for any copy source (to determine
- whether to create a file or directory) and for any deleted path (to
- ensure it exists since svn_delta_editor_t->delete_entry doesn't
- return an error on non-existent nodes). */
-static svn_error_t *
-build(action_code_t action,
- const char *path,
- const char *url,
- svn_revnum_t rev,
- const char *prop_name,
- const svn_string_t *prop_value,
- const char *src_file,
- svn_revnum_t head,
- const char *anchor,
- svn_ra_session_t *session,
- struct operation *operation,
- apr_pool_t *pool)
-{
- apr_array_header_t *path_bits = svn_path_decompose(path, pool);
- const char *path_so_far = "";
- const char *copy_src = NULL;
- svn_revnum_t copy_rev = SVN_INVALID_REVNUM;
- int i;
-
- /* Look for any previous operations we've recognized for PATH. If
- any of PATH's ancestors have not yet been traversed, we'll be
- creating OP_OPEN operations for them as we walk down PATH's path
- components. */
- for (i = 0; i < path_bits->nelts; ++i)
- {
- const char *path_bit = APR_ARRAY_IDX(path_bits, i, const char *);
- path_so_far = svn_relpath_join(path_so_far, path_bit, pool);
- operation = get_operation(path_so_far, operation, pool);
-
- /* If we cross a replace- or add-with-history, remember the
- source of those things in case we need to lookup the node kind
- of one of their children. And if this isn't such a copy,
- but we've already seen one in of our parent paths, we just need
- to extend that copy source path by our current path
- component. */
- if (operation->url
- && SVN_IS_VALID_REVNUM(operation->rev)
- && (operation->operation == OP_REPLACE
- || operation->operation == OP_ADD))
- {
- copy_src = subtract_anchor(anchor, operation->url, pool);
- copy_rev = operation->rev;
- }
- else if (copy_src)
- {
- copy_src = svn_relpath_join(copy_src, path_bit, pool);
- }
- }
-
- /* Handle property changes. */
- if (prop_name)
- {
- if (operation->operation == OP_DELETE)
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "cannot set properties on a location being"
- " deleted ('%s')", path);
- /* If we're not adding this thing ourselves, check for existence. */
- if (! ((operation->operation == OP_ADD) ||
- (operation->operation == OP_REPLACE)))
- {
- SVN_ERR(svn_ra_check_path(session,
- copy_src ? copy_src : path,
- copy_src ? copy_rev : head,
- &operation->kind, pool));
- if (operation->kind == svn_node_none)
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "propset: '%s' not found", path);
- else if ((operation->kind == svn_node_file)
- && (operation->operation == OP_OPEN))
- operation->operation = OP_PROPSET;
- }
- if (! prop_value)
- APR_ARRAY_PUSH(operation->prop_dels, const char *) = prop_name;
- else
- apr_hash_set(operation->prop_mods, prop_name,
- APR_HASH_KEY_STRING, prop_value);
- if (!operation->rev)
- operation->rev = rev;
- return SVN_NO_ERROR;
- }
-
- /* We won't fuss about multiple operations on the same path in the
- following cases:
-
- - the prior operation was, in fact, a no-op (open)
- - the prior operation was a propset placeholder
- - the prior operation was a deletion
-
- Note: while the operation structure certainly supports the
- ability to do a copy of a file followed by a put of new contents
- for the file, we don't let that happen (yet).
- */
- if (operation->operation != OP_OPEN
- && operation->operation != OP_PROPSET
- && operation->operation != OP_DELETE)
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "unsupported multiple operations on '%s'", path);
-
- /* For deletions, we validate that there's actually something to
- delete. If this is a deletion of the child of a copied
- directory, we need to remember to look in the copy source tree to
- verify that this thing actually exists. */
- if (action == ACTION_RM)
- {
- operation->operation = OP_DELETE;
- SVN_ERR(svn_ra_check_path(session,
- copy_src ? copy_src : path,
- copy_src ? copy_rev : head,
- &operation->kind, pool));
- if (operation->kind == svn_node_none)
- {
- if (copy_src && strcmp(path, copy_src))
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "'%s' (from '%s:%ld') not found",
- path, copy_src, copy_rev);
- else
- return svn_error_createf(SVN_ERR_BAD_URL, NULL, "'%s' not found",
- path);
- }
- }
- /* Handle copy operations (which can be adds or replacements). */
- else if (action == ACTION_CP)
- {
- if (rev > head)
- return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
- "Copy source revision cannot be younger "
- "than base revision");
- operation->operation =
- operation->operation == OP_DELETE ? OP_REPLACE : OP_ADD;
- if (operation->operation == OP_ADD)
- {
- /* There is a bug in the current version of mod_dav_svn
- which incorrectly replaces existing directories.
- Therefore we need to check if the target exists
- and raise an error here. */
- SVN_ERR(svn_ra_check_path(session,
- copy_src ? copy_src : path,
- copy_src ? copy_rev : head,
- &operation->kind, pool));
- if (operation->kind != svn_node_none)
- {
- if (copy_src && strcmp(path, copy_src))
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "'%s' (from '%s:%ld') already exists",
- path, copy_src, copy_rev);
- else
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "'%s' already exists", path);
- }
- }
- SVN_ERR(svn_ra_check_path(session, subtract_anchor(anchor, url, pool),
- rev, &operation->kind, pool));
- if (operation->kind == svn_node_none)
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "'%s' not found",
- subtract_anchor(anchor, url, pool));
- operation->url = url;
- operation->rev = rev;
- }
- /* Handle mkdir operations (which can be adds or replacements). */
- else if (action == ACTION_MKDIR)
- {
- operation->operation =
- operation->operation == OP_DELETE ? OP_REPLACE : OP_ADD;
- operation->kind = svn_node_dir;
- }
- /* Handle put operations (which can be adds, replacements, or opens). */
- else if (action == ACTION_PUT)
- {
- if (operation->operation == OP_DELETE)
- {
- operation->operation = OP_REPLACE;
- }
- else
- {
- SVN_ERR(svn_ra_check_path(session,
- copy_src ? copy_src : path,
- copy_src ? copy_rev : head,
- &operation->kind, pool));
- if (operation->kind == svn_node_file)
- operation->operation = OP_OPEN;
- else if (operation->kind == svn_node_none)
- operation->operation = OP_ADD;
- else
- return svn_error_createf(SVN_ERR_BAD_URL, NULL,
- "'%s' is not a file", path);
- }
- operation->kind = svn_node_file;
- operation->src_file = src_file;
- }
- else
- {
- /* We shouldn't get here. */
- SVN_ERR_MALFUNCTION();
- }
-
- return SVN_NO_ERROR;
-}
-
-struct action {
- action_code_t action;
-
- /* revision (copy-from-rev of path[0] for cp; base-rev for put) */
- svn_revnum_t rev;
-
- /* action path[0] path[1]
- * ------ ------- -------
- * mv source target
- * mkdir target (null)
- * cp source target
- * put target source
- * rm target (null)
- * propset target (null)
- */
- const char *path[2];
-
- /* property name/value */
- const char *prop_name;
- const svn_string_t *prop_value;
-};
-
-static svn_error_t *
-execute(const apr_array_header_t *actions,
- const char *anchor,
- apr_hash_t *revprops,
- const char *username,
- const char *password,
- const char *config_dir,
- const apr_array_header_t *config_options,
- svn_boolean_t non_interactive,
- svn_boolean_t no_auth_cache,
- svn_revnum_t base_revision,
- apr_pool_t *pool)
-{
- svn_ra_session_t *session;
- svn_revnum_t head;
- const svn_delta_editor_t *editor;
- svn_ra_callbacks2_t *ra_callbacks;
- void *editor_baton;
- struct operation root;
- svn_error_t *err;
- apr_hash_t *config;
- svn_config_t *cfg_config;
- int i;
-
- SVN_ERR(svn_config_get_config(&config, config_dir, pool));
- SVN_ERR(svn_cmdline__apply_config_options(config, config_options,
- "svnmucc: ", "--config-option"));
- cfg_config = apr_hash_get(config, SVN_CONFIG_CATEGORY_CONFIG,
- APR_HASH_KEY_STRING);
- SVN_ERR(create_ra_callbacks(&ra_callbacks, username, password, config_dir,
- cfg_config, non_interactive, no_auth_cache,
- pool));
- SVN_ERR(svn_ra_open4(&session, NULL, anchor, NULL, ra_callbacks,
- NULL, config, pool));
-
- SVN_ERR(svn_ra_get_latest_revnum(session, &head, pool));
- if (SVN_IS_VALID_REVNUM(base_revision))
- {
- if (base_revision > head)
- return svn_error_createf(SVN_ERR_FS_NO_SUCH_REVISION, NULL,
- "No such revision %ld (youngest is %ld)",
- base_revision, head);
- head = base_revision;
- }
-
- root.children = apr_hash_make(pool);
- root.operation = OP_OPEN;
- for (i = 0; i < actions->nelts; ++i)
- {
- struct action *action = APR_ARRAY_IDX(actions, i, struct action *);
- switch (action->action)
- {
- const char *path1, *path2;
- case ACTION_MV:
- path1 = subtract_anchor(anchor, action->path[0], pool);
- path2 = subtract_anchor(anchor, action->path[1], pool);
- SVN_ERR(build(ACTION_RM, path1, NULL,
- SVN_INVALID_REVNUM, NULL, NULL, NULL, head, anchor,
- session, &root, pool));
- SVN_ERR(build(ACTION_CP, path2, action->path[0],
- head, NULL, NULL, NULL, head, anchor,
- session, &root, pool));
- break;
- case ACTION_CP:
- path2 = subtract_anchor(anchor, action->path[1], pool);
- if (action->rev == SVN_INVALID_REVNUM)
- action->rev = head;
- SVN_ERR(build(ACTION_CP, path2, action->path[0],
- action->rev, NULL, NULL, NULL, head, anchor,
- session, &root, pool));
- break;
- case ACTION_RM:
- path1 = subtract_anchor(anchor, action->path[0], pool);
- SVN_ERR(build(ACTION_RM, path1, NULL,
- SVN_INVALID_REVNUM, NULL, NULL, NULL, head, anchor,
- session, &root, pool));
- break;
- case ACTION_MKDIR:
- path1 = subtract_anchor(anchor, action->path[0], pool);
- SVN_ERR(build(ACTION_MKDIR, path1, action->path[0],
- SVN_INVALID_REVNUM, NULL, NULL, NULL, head, anchor,
- session, &root, pool));
- break;
- case ACTION_PUT:
- path1 = subtract_anchor(anchor, action->path[0], pool);
- SVN_ERR(build(ACTION_PUT, path1, action->path[0],
- SVN_INVALID_REVNUM, NULL, NULL, action->path[1],
- head, anchor, session, &root, pool));
- break;
- case ACTION_PROPSET:
- case ACTION_PROPDEL:
- path1 = subtract_anchor(anchor, action->path[0], pool);
- SVN_ERR(build(action->action, path1, action->path[0],
- SVN_INVALID_REVNUM,
- action->prop_name, action->prop_value,
- NULL, head, anchor, session, &root, pool));
- break;
- case ACTION_PROPSETF:
- default:
- SVN_ERR_MALFUNCTION_NO_RETURN();
- }
- }
-
- SVN_ERR(svn_ra_get_commit_editor3(session, &editor, &editor_baton, revprops,
- commit_callback, NULL, NULL, FALSE, pool));
-
- SVN_ERR(editor->open_root(editor_baton, head, pool, &root.baton));
- err = drive(&root, head, editor, pool);
- if (!err)
- err = editor->close_edit(editor_baton, pool);
- if (err)
- svn_error_clear(editor->abort_edit(editor_baton, pool));
-
- return err;
-}
-
-static svn_error_t *
-read_propvalue_file(const svn_string_t **value_p,
- const char *filename,
- apr_pool_t *pool)
-{
- svn_stringbuf_t *value;
- apr_pool_t *scratch_pool = svn_pool_create(pool);
- apr_file_t *f;
-
- SVN_ERR(svn_io_file_open(&f, filename, APR_READ | APR_BINARY | APR_BUFFERED,
- APR_OS_DEFAULT, scratch_pool));
- SVN_ERR(svn_stringbuf_from_aprfile(&value, f, scratch_pool));
- *value_p = svn_string_create_from_buf(value, pool);
- svn_pool_destroy(scratch_pool);
- return SVN_NO_ERROR;
-}
-
-/* Perform the typical suite of manipulations for user-provided URLs
- on URL, returning the result (allocated from POOL): IRI-to-URI
- conversion, auto-escaping, and canonicalization. */
-static const char *
-sanitize_url(const char *url,
- apr_pool_t *pool)
-{
- url = svn_path_uri_from_iri(url, pool);
- url = svn_path_uri_autoescape(url, pool);
- return svn_uri_canonicalize(url, pool);
-}
-
-static void
-usage(apr_pool_t *pool, int exit_val)
-{
- FILE *stream = exit_val == EXIT_SUCCESS ? stdout : stderr;
- const char msg[] =
- "Multiple URL Command Client (for Subversion)\n"
- "\nUsage: svnmucc [OPTION]... [ACTION]...\n"
- "\nActions:\n"
- " cp REV URL1 URL2 copy URL1@REV to URL2\n"
- " mkdir URL create new directory URL\n"
- " mv URL1 URL2 move URL1 to URL2\n"
- " rm URL delete URL\n"
- " put SRC-FILE URL add or modify file URL with contents copied from\n"
- " SRC-FILE (use \"-\" to read from standard input)\n"
- " propset NAME VAL URL set property NAME on URL to value VAL\n"
- " propsetf NAME VAL URL set property NAME on URL to value from file VAL\n"
- " propdel NAME URL delete property NAME from URL\n"
- "\nOptions:\n"
- " -h, --help display this text\n"
- " -m, --message ARG use ARG as a log message\n"
- " -F, --file ARG read log message from file ARG\n"
- " -u, --username ARG commit the changes as username ARG\n"
- " -p, --password ARG use ARG as the password\n"
- " -U, --root-url ARG interpret all action URLs are relative to ARG\n"
- " -r, --revision ARG use revision ARG as baseline for changes\n"
- " --with-revprop A[=B] set revision property A in new revision to B\n"
- " if specified, else to the empty string\n"
- " -n, --non-interactive don't prompt the user about anything\n"
- " -X, --extra-args ARG append arguments from file ARG (one per line;\n"
- " use \"-\" to read from standard input)\n"
- " --config-dir ARG use ARG to override the config directory\n"
- " --config-option ARG use ARG so override a configuration option\n"
- " --no-auth-cache do not cache authentication tokens\n"
- " --version print version information\n";
- svn_error_clear(svn_cmdline_fputs(msg, stream, pool));
- apr_pool_destroy(pool);
- exit(exit_val);
-}
-
-static void
-insufficient(apr_pool_t *pool)
-{
- handle_error(svn_error_create(SVN_ERR_INCORRECT_PARAMS, NULL,
- "insufficient arguments"),
- pool);
-}
-
-static svn_error_t *
-display_version(apr_getopt_t *os, apr_pool_t *pool)
-{
- const char *ra_desc_start
- = "The following repository access (RA) modules are available:\n\n";
- svn_stringbuf_t *version_footer;
-
- version_footer = svn_stringbuf_create(ra_desc_start, pool);
- SVN_ERR(svn_ra_print_modules(version_footer, pool));
-
- SVN_ERR(svn_opt_print_help3(os, "svnmucc", TRUE, FALSE, version_footer->data,
- NULL, NULL, NULL, NULL, NULL, pool));
-
- return SVN_NO_ERROR;
-}
-
-int
-main(int argc, const char **argv)
-{
- apr_pool_t *pool = init("svnmucc");
- apr_array_header_t *actions = apr_array_make(pool, 1,
- sizeof(struct action *));
- const char *anchor = NULL;
- svn_error_t *err = SVN_NO_ERROR;
- apr_getopt_t *getopt;
- enum {
- config_dir_opt = SVN_OPT_FIRST_LONGOPT_ID,
- config_inline_opt,
- no_auth_cache_opt,
- version_opt,
- with_revprop_opt
- };
- const apr_getopt_option_t options[] = {
- {"message", 'm', 1, ""},
- {"file", 'F', 1, ""},
- {"username", 'u', 1, ""},
- {"password", 'p', 1, ""},
- {"root-url", 'U', 1, ""},
- {"revision", 'r', 1, ""},
- {"with-revprop", with_revprop_opt, 1, ""},
- {"extra-args", 'X', 1, ""},
- {"help", 'h', 0, ""},
- {"non-interactive", 'n', 0, ""},
- {"config-dir", config_dir_opt, 1, ""},
- {"config-option", config_inline_opt, 1, ""},
- {"no-auth-cache", no_auth_cache_opt, 0, ""},
- {"version", version_opt, 0, ""},
- {NULL, 0, 0, NULL}
- };
- const char *message = NULL;
- const char *username = NULL, *password = NULL;
- const char *root_url = NULL, *extra_args_file = NULL;
- const char *config_dir = NULL;
- apr_array_header_t *config_options;
- svn_boolean_t non_interactive = FALSE;
- svn_boolean_t no_auth_cache = FALSE;
- svn_revnum_t base_revision = SVN_INVALID_REVNUM;
- apr_array_header_t *action_args;
- apr_hash_t *revprops = apr_hash_make(pool);
- int i;
-
- config_options = apr_array_make(pool, 0,
- sizeof(svn_cmdline__config_argument_t*));
-
- apr_getopt_init(&getopt, pool, argc, argv);
- getopt->interleave = 1;
- while (1)
- {
- int opt;
- const char *arg;
- const char *opt_arg;
-
- apr_status_t status = apr_getopt_long(getopt, options, &opt, &arg);
- if (APR_STATUS_IS_EOF(status))
- break;
- if (status != APR_SUCCESS)
- handle_error(svn_error_wrap_apr(status, "getopt failure"), pool);
- switch(opt)
- {
- case 'm':
- err = svn_utf_cstring_to_utf8(&message, arg, pool);
- if (err)
- handle_error(err, pool);
- break;
- case 'F':
- {
- const char *arg_utf8;
- svn_stringbuf_t *contents;
- err = svn_utf_cstring_to_utf8(&arg_utf8, arg, pool);
- if (! err)
- err = svn_stringbuf_from_file2(&contents, arg, pool);
- if (! err)
- err = svn_utf_cstring_to_utf8(&message, contents->data, pool);
- if (err)
- handle_error(err, pool);
- }
- break;
- case 'u':
- username = apr_pstrdup(pool, arg);
- break;
- case 'p':
- password = apr_pstrdup(pool, arg);
- break;
- case 'U':
- err = svn_utf_cstring_to_utf8(&root_url, arg, pool);
- if (err)
- handle_error(err, pool);
- if (! svn_path_is_url(root_url))
- handle_error(svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
- "'%s' is not a URL\n", root_url),
- pool);
- root_url = sanitize_url(root_url, pool);
- break;
- case 'r':
- {
- char *digits_end = NULL;
- base_revision = strtol(arg, &digits_end, 10);
- if ((! SVN_IS_VALID_REVNUM(base_revision))
- || (! digits_end)
- || *digits_end)
- handle_error(svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR,
- NULL, "Invalid revision number"),
- pool);
- }
- break;
- case with_revprop_opt:
- err = svn_opt_parse_revprop(&revprops, arg, pool);
- if (err != SVN_NO_ERROR)
- handle_error(err, pool);
- break;
- case 'X':
- extra_args_file = apr_pstrdup(pool, arg);
- break;
- case 'n':
- non_interactive = TRUE;
- break;
- case config_dir_opt:
- err = svn_utf_cstring_to_utf8(&config_dir, arg, pool);
- if (err)
- handle_error(err, pool);
- break;
- case config_inline_opt:
- err = svn_utf_cstring_to_utf8(&opt_arg, arg, pool);
- if (err)
- handle_error(err, pool);
-
- err = svn_cmdline__parse_config_option(config_options, opt_arg,
- pool);
- if (err)
- handle_error(err, pool);
- break;
- case no_auth_cache_opt:
- no_auth_cache = TRUE;
- break;
- case version_opt:
- SVN_INT_ERR(display_version(getopt, pool));
- exit(EXIT_SUCCESS);
- break;
- case 'h':
- usage(pool, EXIT_SUCCESS);
- break;
- }
- }
-
- /* Copy the rest of our command-line arguments to an array,
- UTF-8-ing them along the way. */
- action_args = apr_array_make(pool, getopt->argc, sizeof(const char *));
- while (getopt->ind < getopt->argc)
- {
- const char *arg = getopt->argv[getopt->ind++];
- if ((err = svn_utf_cstring_to_utf8(&(APR_ARRAY_PUSH(action_args,
- const char *)),
- arg, pool)))
- handle_error(err, pool);
- }
-
- /* If there are extra arguments in a supplementary file, tack those
- on, too (again, in UTF8 form). */
- if (extra_args_file)
- {
- const char *extra_args_file_utf8;
- svn_stringbuf_t *contents, *contents_utf8;
-
- err = svn_utf_cstring_to_utf8(&extra_args_file_utf8,
- extra_args_file, pool);
- if (! err)
- err = svn_stringbuf_from_file2(&contents, extra_args_file_utf8, pool);
- if (! err)
- err = svn_utf_stringbuf_to_utf8(&contents_utf8, contents, pool);
- if (err)
- handle_error(err, pool);
- svn_cstring_split_append(action_args, contents_utf8->data, "\n\r",
- FALSE, pool);
- }
-
- /* Now, we iterate over the combined set of arguments -- our actions. */
- for (i = 0; i < action_args->nelts; )
- {
- int j, num_url_args;
- const char *action_string = APR_ARRAY_IDX(action_args, i, const char *);
- struct action *action = apr_palloc(pool, sizeof(*action));
-
- /* First, parse the action. */
- if (! strcmp(action_string, "mv"))
- action->action = ACTION_MV;
- else if (! strcmp(action_string, "cp"))
- action->action = ACTION_CP;
- else if (! strcmp(action_string, "mkdir"))
- action->action = ACTION_MKDIR;
- else if (! strcmp(action_string, "rm"))
- action->action = ACTION_RM;
- else if (! strcmp(action_string, "put"))
- action->action = ACTION_PUT;
- else if (! strcmp(action_string, "propset"))
- action->action = ACTION_PROPSET;
- else if (! strcmp(action_string, "propsetf"))
- action->action = ACTION_PROPSETF;
- else if (! strcmp(action_string, "propdel"))
- action->action = ACTION_PROPDEL;
- else if (! strcmp(action_string, "?") || ! strcmp(action_string, "h")
- || ! strcmp(action_string, "help"))
- usage(pool, EXIT_SUCCESS);
- else
- handle_error(svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
- "'%s' is not an action\n",
- action_string), pool);
- if (++i == action_args->nelts)
- insufficient(pool);
-
- /* For copies, there should be a revision number next. */
- if (action->action == ACTION_CP)
- {
- const char *rev_str = APR_ARRAY_IDX(action_args, i, const char *);
- if (strcmp(rev_str, "head") == 0)
- action->rev = SVN_INVALID_REVNUM;
- else if (strcmp(rev_str, "HEAD") == 0)
- action->rev = SVN_INVALID_REVNUM;
- else
- {
- char *end;
-
- while (*rev_str == 'r')
- ++rev_str;
-
- action->rev = strtol(rev_str, &end, 0);
- if (*end)
- handle_error(svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
- "'%s' is not a revision\n",
- rev_str), pool);
- }
- if (++i == action_args->nelts)
- insufficient(pool);
- }
- else
- {
- action->rev = SVN_INVALID_REVNUM;
- }
-
- /* For puts, there should be a local file next. */
- if (action->action == ACTION_PUT)
- {
- action->path[1] =
- svn_dirent_canonicalize(APR_ARRAY_IDX(action_args, i,
- const char *), pool);
- if (++i == action_args->nelts)
- insufficient(pool);
- }
-
- /* For propset, propsetf, and propdel, a property name (and
- maybe a property value or file which contains one) comes next. */
- if ((action->action == ACTION_PROPSET)
- || (action->action == ACTION_PROPSETF)
- || (action->action == ACTION_PROPDEL))
- {
- action->prop_name = APR_ARRAY_IDX(action_args, i, const char *);
- if (++i == action_args->nelts)
- insufficient(pool);
-
- if (action->action == ACTION_PROPDEL)
- {
- action->prop_value = NULL;
- }
- else if (action->action == ACTION_PROPSET)
- {
- action->prop_value =
- svn_string_create(APR_ARRAY_IDX(action_args, i,
- const char *), pool);
- if (++i == action_args->nelts)
- insufficient(pool);
- }
- else
- {
- const char *propval_file =
- svn_dirent_canonicalize(APR_ARRAY_IDX(action_args, i,
- const char *), pool);
-
- if (++i == action_args->nelts)
- insufficient(pool);
-
- err = read_propvalue_file(&(action->prop_value),
- propval_file, pool);
- if (err)
- handle_error(err, pool);
-
- action->action = ACTION_PROPSET;
- }
-
- if (action->prop_value
- && svn_prop_needs_translation(action->prop_name))
- {
- svn_string_t *translated_value;
- err = svn_subst_translate_string2(&translated_value, NULL,
- NULL, action->prop_value, NULL,
- FALSE, pool, pool);
- if (err)
- handle_error(
- svn_error_quick_wrap(err,
- "Error normalizing property value"),
- pool);
- action->prop_value = translated_value;
- }
- }
-
- /* How many URLs does this action expect? */
- if (action->action == ACTION_RM
- || action->action == ACTION_MKDIR
- || action->action == ACTION_PUT
- || action->action == ACTION_PROPSET
- || action->action == ACTION_PROPSETF /* shouldn't see this one */
- || action->action == ACTION_PROPDEL)
- num_url_args = 1;
- else
- num_url_args = 2;
-
- /* Parse the required number of URLs. */
- for (j = 0; j < num_url_args; ++j)
- {
- const char *url = APR_ARRAY_IDX(action_args, i, const char *);
-
- /* If there's a ROOT_URL, we expect URL to be a path
- relative to ROOT_URL (and we build a full url from the
- combination of the two). Otherwise, it should be a full
- url. */
- if (! svn_path_is_url(url))
- {
- if (! root_url)
- handle_error(svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
- "'%s' is not a URL, and "
- "--root-url (-U) not provided\n",
- url), pool);
- /* ### These relpaths are already URI-encoded. */
- url = apr_pstrcat(pool, root_url, "/",
- svn_relpath_canonicalize(url, pool),
- (char *)NULL);
- }
- url = sanitize_url(url, pool);
- action->path[j] = url;
-
- /* The cp source could be the anchor, but the other URLs should be
- children of the anchor. */
- if (! (action->action == ACTION_CP && j == 0))
- url = svn_uri_dirname(url, pool);
- if (! anchor)
- anchor = url;
- else
- anchor = svn_uri_get_longest_ancestor(anchor, url, pool);
-
- if ((++i == action_args->nelts) && (j >= num_url_args))
- insufficient(pool);
- }
- APR_ARRAY_PUSH(actions, struct action *) = action;
- }
-
- if (! actions->nelts)
- usage(pool, EXIT_FAILURE);
-
- if (message == NULL)
- {
- if (apr_hash_get(revprops, SVN_PROP_REVISION_LOG,
- APR_HASH_KEY_STRING) == NULL)
- /* None of -F, -m, or --with-revprop=svn:log specified; default. */
- apr_hash_set(revprops, SVN_PROP_REVISION_LOG, APR_HASH_KEY_STRING,
- svn_string_create("committed using svnmucc", pool));
- }
- else
- {
- /* -F or -m specified; use that even if --with-revprop=svn:log. */
- apr_hash_set(revprops, SVN_PROP_REVISION_LOG, APR_HASH_KEY_STRING,
- svn_string_create(message, pool));
- }
-
- if ((err = execute(actions, anchor, revprops, username, password,
- config_dir, config_options, non_interactive,
- no_auth_cache, base_revision, pool)))
- handle_error(err, pool);
-
- svn_pool_destroy(pool);
- return EXIT_SUCCESS;
-}
diff --git a/tools/dev/aprerr.txt b/tools/dev/aprerr.txt
new file mode 100644
index 0000000..7b532db
--- /dev/null
+++ b/tools/dev/aprerr.txt
@@ -0,0 +1,138 @@
+APR_SUCCESS = 0
+SOCBASEERR = 10000
+SOCEPERM = 10001
+SOCESRCH = 10003
+SOCEINTR = 10004
+SOCENXIO = 10006
+SOCEBADF = 10009
+SOCEACCES = 10013
+SOCEFAULT = 10014
+SOCEINVAL = 10022
+SOCEMFILE = 10024
+SOCEPIPE = 10032
+SOCEWOULDBLOCK = 10035
+SOCEINPROGRESS = 10036
+SOCEALREADY = 10037
+SOCENOTSOCK = 10038
+SOCEDESTADDRREQ = 10039
+SOCEMSGSIZE = 10040
+SOCEPROTOTYPE = 10041
+SOCENOPROTOOPT = 10042
+SOCEPROTONOSUPPORT = 10043
+SOCESOCKTNOSUPPORT = 10044
+SOCEOPNOTSUPP = 10045
+SOCEPFNOSUPPORT = 10046
+SOCEAFNOSUPPORT = 10047
+SOCEADDRINUSE = 10048
+SOCEADDRNOTAVAIL = 10049
+SOCENETDOWN = 10050
+SOCENETUNREACH = 10051
+SOCENETRESET = 10052
+SOCECONNABORTED = 10053
+SOCECONNRESET = 10054
+SOCENOBUFS = 10055
+SOCEISCONN = 10056
+SOCENOTCONN = 10057
+SOCESHUTDOWN = 10058
+SOCETOOMANYREFS = 10059
+SOCETIMEDOUT = 10060
+SOCECONNREFUSED = 10061
+SOCELOOP = 10062
+SOCENAMETOOLONG = 10063
+SOCEHOSTDOWN = 10064
+SOCEHOSTUNREACH = 10065
+SOCENOTEMPTY = 10066
+APR_UTIL_ERRSPACE_SIZE = 20000
+APR_OS_START_ERROR = 20000
+APR_ENOSTAT = 20001
+APR_ENOPOOL = 20002
+APR_EBADDATE = 20004
+APR_EINVALSOCK = 20005
+APR_ENOPROC = 20006
+APR_ENOTIME = 20007
+APR_ENODIR = 20008
+APR_ENOLOCK = 20009
+APR_ENOPOLL = 20010
+APR_ENOSOCKET = 20011
+APR_ENOTHREAD = 20012
+APR_ENOTHDKEY = 20013
+APR_EGENERAL = 20014
+APR_ENOSHMAVAIL = 20015
+APR_EBADIP = 20016
+APR_EBADMASK = 20017
+APR_EDSOOPEN = 20019
+APR_EABSOLUTE = 20020
+APR_ERELATIVE = 20021
+APR_EINCOMPLETE = 20022
+APR_EABOVEROOT = 20023
+APR_EBADPATH = 20024
+APR_EPATHWILD = 20025
+APR_ESYMNOTFOUND = 20026
+APR_EPROC_UNKNOWN = 20027
+APR_ENOTENOUGHENTROPY = 20028
+APR_OS_ERRSPACE_SIZE = 50000
+APR_OS_START_STATUS = 70000
+APR_INCHILD = 70001
+APR_INPARENT = 70002
+APR_DETACH = 70003
+APR_NOTDETACH = 70004
+APR_CHILD_DONE = 70005
+APR_CHILD_NOTDONE = 70006
+APR_TIMEUP = 70007
+APR_INCOMPLETE = 70008
+APR_BADCH = 70012
+APR_BADARG = 70013
+APR_EOF = 70014
+APR_NOTFOUND = 70015
+APR_ANONYMOUS = 70019
+APR_FILEBASED = 70020
+APR_KEYBASED = 70021
+APR_EINIT = 70022
+APR_ENOTIMPL = 70023
+APR_EMISMATCH = 70024
+APR_EBUSY = 70025
+APR_UTIL_START_STATUS = 100000
+APR_ENOKEY = 100001
+APR_ENOIV = 100002
+APR_EKEYTYPE = 100003
+APR_ENOSPACE = 100004
+APR_ECRYPT = 100005
+APR_EPADDING = 100006
+APR_EKEYLENGTH = 100007
+APR_ENOCIPHER = 100008
+APR_ENODIGEST = 100009
+APR_ENOENGINE = 100010
+APR_EINITENGINE = 100011
+APR_EREINIT = 100012
+APR_OS_START_USEERR = 120000
+APR_OS_START_USERERR = 120000
+APR_OS_START_CANONERR = 620000
+APR_EACCES = 620001
+APR_EEXIST = 620002
+APR_ENAMETOOLONG = 620003
+APR_ENOENT = 620004
+APR_ENOTDIR = 620005
+APR_ENOSPC = 620006
+APR_ENOMEM = 620007
+APR_EMFILE = 620008
+APR_ENFILE = 620009
+APR_EBADF = 620010
+APR_EINVAL = 620011
+APR_ESPIPE = 620012
+APR_EAGAIN = 620013
+APR_EINTR = 620014
+APR_ENOTSOCK = 620015
+APR_ECONNREFUSED = 620016
+APR_EINPROGRESS = 620017
+APR_ECONNABORTED = 620018
+APR_ECONNRESET = 620019
+APR_ETIMEDOUT = 620020
+APR_EHOSTUNREACH = 620021
+APR_ENETUNREACH = 620022
+APR_EFTYPE = 620023
+APR_EPIPE = 620024
+APR_EXDEV = 620025
+APR_ENOTEMPTY = 620026
+APR_EAFNOSUPPORT = 620027
+APR_OS_START_EAIERR = 670000
+APR_OS_START_SYSERR = 720000
diff --git a/tools/dev/benchmarks/large_dirs/create_bigdir.sh b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
index 9193ee5..a389dcc 100755
--- a/tools/dev/benchmarks/large_dirs/create_bigdir.sh
+++ b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
@@ -124,6 +124,20 @@ run_svn_del() {
fi
}
+run_svn_del_many() {
+ printf "\n" > files.lst
+ sequence=`get_sequence 2 ${1}`
+ for i in $sequence; do
+ printf "$WC/${1}_c/$i\n" >> files.lst
+ done
+
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} del -q --targets files.lst > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.del_many.$1" ${SVN} del -q --targets files.lst > /dev/null
+ fi
+}
+
run_svn_ci() {
if [ "${VALGRIND}" = "" ] ; then
time ${SVN} ci $WC/$1 -m "" -q > /dev/null
@@ -185,12 +199,13 @@ while [ $FILECOUNT -lt $MAXCOUNT ]; do
run_svn_del ${FILECOUNT} 1
printf "\tDeleting files ... \t"
- time sh -c "
- for i in $sequence; do
- ${SVN} del $WC/${FILECOUNT}_c/\$i -q
- done "
+ if [ "$FILECOUNT" == "1" ] ; then
+ printf " skipped (0 files to delete)\n"
+ else
+ run_svn_del_many ${FILECOUNT}
+ fi
- printf "\tCommit deletions ...\t"
+ printf "\tCommit deletions ..\t"
run_svn_ci ${FILECOUNT}_c del
rm -rf $WC
diff --git a/tools/dev/benchmarks/suite1/benchmark.py b/tools/dev/benchmarks/suite1/benchmark.py
index 7eb3dd9..fc61848 100755
--- a/tools/dev/benchmarks/suite1/benchmark.py
+++ b/tools/dev/benchmarks/suite1/benchmark.py
@@ -17,42 +17,171 @@
# specific language governing permissions and limitations
# under the License.
-"""
-usage: benchmark.py run <run_file> <levels> <spread> [N]
- benchmark.py show <run_file>
- benchmark.py compare <run_file1> <run_file2>
- benchmark.py combine <new_file> <run_file1> <run_file2> ...
-
-Test data is written to run_file.
-If a run_file exists, data is added to it.
-<levels> is the number of directory levels to create
-<spread> is the number of child trees spreading off each dir level
+"""Usage: benchmark.py run|list|compare|show|chart <selection> ...
+
+SELECTING TIMINGS -- B@R,LxS
+
+In the subcommands below, a timings selection consists of a string with up to
+four elements:
+ <branch>@<revision>,<levels>x<spread>
+abbreviated as:
+ B@R,LxS
+
+<branch> is a label of an svn branch, e.g. "1.7.x".
+<revision> is the last-changed-revision of above branch.
+<levels> is the number of directory levels created in the benchmark.
+<spread> is the number of child trees spreading off each dir level.
+
+<branch_name> and <revision> are simply used for labeling. Upon the actual
+test runs, you should enter labels matching the selected --svn-bin-dir.
+Later, you can select runs individually by using these labels.
+
+For <revision>, you can provide special keywords:
+- 'each' has the same effect as entering each available revision number that
+ is on record in the db in a separate timings selection.
+- 'last' is the same as 'each', but shows only the last 10 revisions. 'last'
+ can be combined with a number, e.g. 'last12'.
+
+For all subcommands except 'run', you can omit some or all of the elements of
+a timings selection to combine all available timings sets. Try that out with
+the 'list' subcommand.
+
+Examples:
+ benchmark.py run 1.7.x@12345,5x5
+ benchmark.py show trunk@12345
+ benchmark.py compare 1.7.0,1x100 trunk@each,1x100
+ benchmark.py chart compare 1.7.0,5x5 trunk@last12,5x5
+
+
+RUN BENCHMARKS
+
+ benchmark.py run B@R,LxS [N] [options]
+
+Test data is added to an sqlite database created automatically, by default
+'benchmark.db' in the current working directory. To specify a different path,
+use option -f <path_to_db>.
+
If <N> is provided, the run is repeated N times.
-"""
+
+<levels> and <spread> control the way the tested working copy is structured:
+ <levels>: number of directory levels to create.
+ <spread>: number of files and subdirectories created in each dir.
+
+
+LIST WHAT IS ON RECORD
+
+ benchmark.py list [B@R,LxS]
+
+Find entries in the database for the given constraints. Any arguments can
+be omitted. (To select only a rev, start with a '@', like '@123'; to select
+only spread, start with an 'x', like "x100".)
+
+Call without arguments to get a listing of all available constraints.
+
+
+COMPARE TIMINGS
+
+ benchmark.py compare B@R,LxS B@R,LxS [B@R,LxS [...]]
+
+Compare any number of timings sets to the first provided set (in text mode).
+For example:
+ benchmark.py compare 1.7.0 trunk@1349903
+ Compare the total timings of all combined '1.7.0' branch runs to
+ all combined runs of 'trunk'-at-revision-1349903.
+ benchmark.py compare 1.7.0,5x5 trunk@1349903,5x5
+ Same as above, but only compare the working copy types with 5 levels
+ and a spread of 5.
+
+Use the -c option to limit comparison to specific command names.
+
+
+SHOW TIMINGS
+
+ benchmark.py show B@R,LxS [B@R,LxS [...]]
+
+Print out a summary of the timings selected from the given constraints.
+
+
+GENERATE CHARTS
+
+ benchmark.py chart compare B@R,LxS B@R,LxS [ B@R,LxS ... ]
+
+Produce a bar chart that compares any number of sets of timings. Like with
+the plain 'compare' command, the first set is taken as a reference point for
+100% and +-0 seconds. Each following dataset produces a set of labeled bar
+charts, grouped by svn command names. At least two timings sets must be
+provided.
+
+Use the -c option to limit comparison to specific command names.
+
+
+EXAMPLES
+
+# Run 3 benchmarks on svn 1.7.0 with 5 dir levels and 5 files and subdirs for
+# each level (spread). Timings are saved in ./benchmark.db.
+# Provide label '1.7.0' and its Last-Changed-Rev for later reference.
+./benchmark.py run --svn-bin-dir ~/svn-prefix/1.7.0/bin 1.7.0@1181106,5x5 3
+
+# Record 3 benchmark runs on trunk, again naming its Last-Changed-Rev.
+# (You may also set your $PATH instead of using --svn-bin-dir.)
+./benchmark.py run --svn-bin-dir ~/svn-prefix/trunk/bin trunk@1352725,5x5 3
+
+# Work with the results of above two runs
+./benchmark.py list
+./benchmark.py compare 1.7.0 trunk
+./benchmark.py show 1.7.0 trunk
+./benchmark.py chart compare 1.7.0 trunk
+./benchmark.py chart compare 1.7.0 trunk -c "update,commit,TOTAL RUN"
+
+# Rebuild r1352598, run it and chart improvements since 1.7.0.
+svn up -r1352598 ~/src/trunk
+make -C ~/src/trunk dist-clean install
+export PATH="$HOME/svn-prefix/trunk/bin:$PATH"
+which svn
+./benchmark.py run trunk@1352598,5x5 3
+./benchmark.py chart compare 1.7.0 trunk@1352598 trunk@1352725 -o chart.svg
+
+
+GLOBAL OPTIONS"""
import os
-import sys
+import time
+import datetime
+import sqlite3
+import optparse
import tempfile
import subprocess
-import datetime
import random
import shutil
-import cPickle
-import optparse
import stat
+import string
+from copy import copy
+IGNORE_COMMANDS = ('--version', )
TOTAL_RUN = 'TOTAL RUN'
-timings = None
+j = os.path.join
+
+def bail(msg=None):
+ if msg:
+ print msg
+ exit(1)
-def run_cmd(cmd, stdin=None, shell=False):
+def time_str():
+ return time.strftime('%Y-%m-%d %H:%M:%S');
- if shell:
- printable_cmd = 'CMD: ' + cmd
- else:
- printable_cmd = 'CMD: ' + ' '.join(cmd)
+def timedelta_to_seconds(td):
+ return ( float(td.seconds)
+ + float(td.microseconds) / (10**6)
+ + td.days * 24 * 60 * 60 )
+
+def run_cmd(cmd, stdin=None, shell=False, verbose=False):
if options.verbose:
- print printable_cmd
+ if shell:
+ printable_cmd = cmd
+ else:
+ printable_cmd = ' '.join(cmd)
+ print 'CMD:', printable_cmd
if stdin:
stdin_arg = subprocess.PIPE
@@ -66,573 +195,1115 @@ def run_cmd(cmd, stdin=None, shell=False):
shell=shell)
stdout,stderr = p.communicate(input=stdin)
- if options.verbose:
+ if verbose:
if (stdout):
print "STDOUT: [[[\n%s]]]" % ''.join(stdout)
if (stderr):
print "STDERR: [[[\n%s]]]" % ''.join(stderr)
- return stdout,stderr
+ return stdout, stderr
+
+
+_next_unique_basename_count = 0
+
+def next_unique_basename(prefix):
+ global _next_unique_basename_count
+ _next_unique_basename_count += 1
+ return '_'.join((prefix, str(_next_unique_basename_count)))
+
+
+si_units = [
+ (1000 ** 5, 'P'),
+ (1000 ** 4, 'T'),
+ (1000 ** 3, 'G'),
+ (1000 ** 2, 'M'),
+ (1000 ** 1, 'K'),
+ (1000 ** 0, ''),
+ ]
+def n_label(n):
+ """(stolen from hurry.filesize)"""
+ for factor, suffix in si_units:
+ if n >= factor:
+ break
+ amount = int(n/factor)
+ if isinstance(suffix, tuple):
+ singular, multiple = suffix
+ if amount == 1:
+ suffix = singular
+ else:
+ suffix = multiple
+ return str(amount) + suffix
+
+
+def split_arg_once(l_r, sep):
+ if not l_r:
+ return (None, None)
+ if sep in l_r:
+ l, r = l_r.split(sep)
+ else:
+ l = l_r
+ r = None
+ if not l:
+ l = None
+ if not r:
+ r = None
+ return (l, r)
+
+RUN_KIND_SEPARATORS=('@', ',', 'x')
+
+class RunKind:
+ def __init__(self, b_r_l_s):
+ b_r, l_s = split_arg_once(b_r_l_s, RUN_KIND_SEPARATORS[1])
+ self.branch, self.revision = split_arg_once(b_r, RUN_KIND_SEPARATORS[0])
+ self.levels, self.spread = split_arg_once(l_s, RUN_KIND_SEPARATORS[2])
+ if self.levels: self.levels = int(self.levels)
+ if self.spread: self.spread = int(self.spread)
+
+ def label(self):
+ label_parts = []
+ if self.branch:
+ label_parts.append(self.branch)
+ if self.revision:
+ label_parts.append(RUN_KIND_SEPARATORS[0])
+ label_parts.append(self.revision)
+ if self.levels or self.spread:
+ label_parts.append(RUN_KIND_SEPARATORS[1])
+ if self.levels:
+ label_parts.append(str(self.levels))
+ if self.spread:
+ label_parts.append(RUN_KIND_SEPARATORS[2])
+ label_parts.append(str(self.spread))
+ return ''.join(label_parts)
+
+ def args(self):
+ return (self.branch, self.revision, self.levels, self.spread)
+
+
+def parse_timings_selections(db, *args):
+ run_kinds = []
+
+ for arg in args:
+ run_kind = RunKind(arg)
+
+ if run_kind.revision == 'each':
+ run_kind.revision = None
+ query = TimingQuery(db, run_kind)
+ for revision in query.get_sorted_revisions():
+ revision_run_kind = copy(run_kind)
+ revision_run_kind.revision = revision
+ run_kinds.append(revision_run_kind)
+ elif run_kind.revision and run_kind.revision.startswith('last'):
+ Nstr = run_kind.revision[4:]
+ if not Nstr:
+ N = 10
+ else:
+ N = int(Nstr)
+ run_kind.revision = None
+ query = TimingQuery(db, run_kind)
+ for revision in query.get_sorted_revisions()[-N:]:
+ revision_run_kind = copy(run_kind)
+ revision_run_kind.revision = revision
+ run_kinds.append(revision_run_kind)
+ else:
+ run_kinds.append(run_kind)
-def timedelta_to_seconds(td):
- return ( float(td.seconds)
- + float(td.microseconds) / (10**6)
- + td.days * 24 * 60 * 60 )
+ return run_kinds
+def parse_one_timing_selection(db, *args):
+ run_kinds = parse_timings_selections(db, *args)
+ if len(run_kinds) != 1:
+ bail("I need exactly one timings identifier, not '%s'"
+ % (' '.join(*args)))
+ return run_kinds[0]
-class Timings:
- def __init__(self, *ignore_svn_cmds):
- self.timings = {}
- self.current_name = None
+
+
+PATHNAME_VALID_CHARS = "-_.,@%s%s" % (string.ascii_letters, string.digits)
+def filesystem_safe_string(s):
+ return ''.join(c for c in s if c in PATHNAME_VALID_CHARS)
+
+def do_div(ref, val):
+ if ref:
+ return float(val) / float(ref)
+ else:
+ return 0.0
+
+def do_diff(ref, val):
+ return float(val) - float(ref)
+
+
+# ------------------------- database -------------------------
+
+class TimingsDb:
+ def __init__(self, db_path):
+ self.db_path = db_path;
+ self.conn = sqlite3.connect(db_path)
+ self.ensure_tables_created()
+
+ def ensure_tables_created(self):
+ c = self.conn.cursor()
+
+ c.execute("""SELECT name FROM sqlite_master WHERE type='table' AND
+ name='batch'""")
+ if c.fetchone():
+ # exists
+ return
+
+ print 'Creating database tables.'
+ c.executescript('''
+ CREATE TABLE batch (
+ batch_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ started TEXT,
+ ended TEXT
+ );
+
+ CREATE TABLE run_kind (
+ run_kind_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ branch TEXT NOT NULL,
+ revision TEXT NOT NULL,
+ wc_levels INTEGER,
+ wc_spread INTEGER,
+ UNIQUE(branch, revision, wc_levels, wc_spread)
+ );
+
+ CREATE TABLE run (
+ run_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ batch_id INTEGER NOT NULL REFERENCES batch(batch_id),
+ run_kind_id INTEGER NOT NULL REFERENCES run_kind(run_kind_id),
+ started TEXT,
+ ended TEXT,
+ aborted INTEGER
+ );
+
+ CREATE TABLE timings (
+ run_id INTEGER NOT NULL REFERENCES run(run_id),
+ command TEXT NOT NULL,
+ sequence INTEGER,
+ timing REAL
+ );'''
+ )
+ self.conn.commit()
+ c.close();
+
+
+class Batch:
+ def __init__(self, db):
+ self.db = db
+ self.started = time_str()
+ c = db.conn.cursor()
+ c.execute("INSERT INTO batch (started) values (?)", (self.started,))
+ db.conn.commit()
+ self.id = c.lastrowid
+ c.close()
+
+ def done(self):
+ conn = self.db.conn
+ c = conn.cursor()
+ c.execute("""
+ UPDATE batch
+ SET ended = ?
+ WHERE batch_id = ?""",
+ (time_str(), self.id))
+ conn.commit()
+ c.close()
+
+class Run:
+ def __init__(self, batch, run_kind):
+ self.batch = batch
+ conn = self.batch.db.conn
+ c = conn.cursor()
+
+ c.execute("""
+ SELECT run_kind_id FROM run_kind
+ WHERE branch = ?
+ AND revision = ?
+ AND wc_levels = ?
+ AND wc_spread = ?""",
+ run_kind.args())
+ kind_ids = c.fetchone()
+ if kind_ids:
+ kind_id = kind_ids[0]
+ else:
+ c.execute("""
+ INSERT INTO run_kind (branch, revision, wc_levels, wc_spread)
+ VALUES (?, ?, ?, ?)""",
+ run_kind.args())
+ conn.commit()
+ kind_id = c.lastrowid
+
+ self.started = time_str()
+
+ c.execute("""
+ INSERT INTO run
+ (batch_id, run_kind_id, started)
+ VALUES
+ (?, ?, ?)""",
+ (self.batch.id, kind_id, self.started))
+ conn.commit()
+ self.id = c.lastrowid
+ c.close();
self.tic_at = None
- self.ignore = ignore_svn_cmds
- self.name = None
+ self.current_command = None
+ self.timings = []
- def tic(self, name):
- if name in self.ignore:
+ def tic(self, command):
+ if command in IGNORE_COMMANDS:
return
self.toc()
- self.current_name = name
+ self.current_command = command
self.tic_at = datetime.datetime.now()
def toc(self):
- if self.current_name and self.tic_at:
+ if self.current_command and self.tic_at:
toc_at = datetime.datetime.now()
- self.submit_timing(self.current_name,
+ self.remember_timing(self.current_command,
timedelta_to_seconds(toc_at - self.tic_at))
- self.current_name = None
+ self.current_command = None
self.tic_at = None
- def submit_timing(self, name, seconds):
- times = self.timings.get(name)
- if not times:
- times = []
- self.timings[name] = times
- times.append(seconds)
+ def remember_timing(self, command, seconds):
+ self.timings.append((command, seconds))
+
+ def submit_timings(self):
+ conn = self.batch.db.conn
+ c = conn.cursor()
+ print 'submitting...'
+
+ c.executemany("""
+ INSERT INTO timings
+ (run_id, command, sequence, timing)
+ VALUES
+ (?, ?, ?, ?)""",
+ [(self.id, t[0], (i + 1), t[1]) for i,t in enumerate(self.timings)])
+
+ conn.commit()
+ c.close()
+
+ def done(self, aborted=False):
+ conn = self.batch.db.conn
+ c = conn.cursor()
+ c.execute("""
+ UPDATE run
+ SET ended = ?, aborted = ?
+ WHERE run_id = ?""",
+ (time_str(), aborted, self.id))
+ conn.commit()
+ c.close()
+
+
+class TimingQuery:
+ def __init__(self, db, run_kind):
+ self.cursor = db.conn.cursor()
+ self.constraints = []
+ self.values = []
+ self.timings = None
+ self.FROM_WHERE = """
+ FROM batch AS b,
+ timings AS t,
+ run AS r,
+ run_kind as k
+ WHERE
+ t.run_id = r.run_id
+ AND k.run_kind_id = r.run_kind_id
+ AND b.batch_id = r.batch_id
+ AND r.aborted = 0
+ """
+ self.append_constraint('k.branch', run_kind.branch)
+ self.each_revision = False
+ if run_kind.revision == 'each':
+ self.each_revision = True
+ else:
+ self.append_constraint('k.revision', run_kind.revision)
+ self.append_constraint('k.wc_levels', run_kind.levels)
+ self.append_constraint('k.wc_spread', run_kind.spread)
+ self.label = run_kind.label()
+
+ def append_constraint(self, column_name, val):
+ if val:
+ self.constraints.append('AND %s = ?' % column_name)
+ self.values.append(val)
+
+ def remove_last_constraint(self):
+ del self.constraints[-1]
+ del self.values[-1]
+
+ def get_sorted_X(self, x, n=1):
+ query = ['SELECT DISTINCT %s' % x,
+ self.FROM_WHERE ]
+ query.extend(self.constraints)
+ query.append('ORDER BY %s' % x)
+ c = db.conn.cursor()
+ try:
+ c.execute(' '.join(query), self.values)
+ if n == 1:
+ return [tpl[0] for tpl in c.fetchall()]
+ else:
+ return c.fetchall()
+ finally:
+ c.close()
+
+ def get_sorted_command_names(self):
+ return self.get_sorted_X('t.command')
- def min_max_avg(self, name):
- ttimings = self.timings.get(name)
- return ( min(ttimings),
- max(ttimings),
- reduce(lambda x,y: x + y, ttimings) / len(ttimings) )
+ def get_sorted_branches(self):
+ return self.get_sorted_X('k.branch')
- def summary(self):
- s = []
- if self.name:
- s.append('Timings for %s' % self.name)
- s.append(' N min max avg operation (unit is seconds)')
+ def get_sorted_revisions(self):
+ return self.get_sorted_X('k.revision')
+
+ def get_sorted_levels_spread(self):
+ return self.get_sorted_X('k.wc_levels,k.wc_spread', n = 2)
+
+ def count_runs_batches(self):
+ query = ["""SELECT
+ count(DISTINCT r.run_id),
+ count(DISTINCT b.batch_id)""",
+ self.FROM_WHERE ]
+ query.extend(self.constraints)
+ c = db.conn.cursor()
+ try:
+ #print ' '.join(query)
+ c.execute(' '.join(query), self.values)
+ return c.fetchone()
+ finally:
+ c.close()
+
+ def get_command_timings(self, command):
+ query = ["""SELECT
+ count(t.timing),
+ min(t.timing),
+ max(t.timing),
+ avg(t.timing)""",
+ self.FROM_WHERE ]
+ self.append_constraint('t.command', command)
+ try:
+ query.extend(self.constraints)
+ c = db.conn.cursor()
+ try:
+ c.execute(' '.join(query), self.values)
+ return c.fetchone()
+ finally:
+ c.close()
+ finally:
+ self.remove_last_constraint()
- names = sorted(self.timings.keys())
+ def get_timings(self):
+ if self.timings:
+ return self.timings
+ self.timings = {}
+ for command_name in self.get_sorted_command_names():
+ self.timings[command_name] = self.get_command_timings(command_name)
+ return self.timings
- for name in names:
- timings = self.timings.get(name)
- if not name or not timings: continue
- tmin, tmax, tavg = self.min_max_avg(name)
+# ------------------------------------------------------------ run tests
- s.append('%5d %7.2f %7.2f %7.2f %s' % (
- len(timings),
- tmin,
- tmax,
- tavg,
- name))
- return '\n'.join(s)
+def perform_run(batch, run_kind,
+ svn_bin, svnadmin_bin, verbose):
+ run = Run(batch, run_kind)
- def compare_to(self, other):
- def do_div(a, b):
- if b:
- return float(a) / float(b)
- else:
- return 0.0
+ def create_tree(in_dir, _levels, _spread):
+ try:
+ os.mkdir(in_dir)
+ except:
+ pass
+
+ for i in range(_spread):
+ # files
+ fn = j(in_dir, next_unique_basename('file'))
+ f = open(fn, 'w')
+ f.write('This is %s\n' % fn)
+ f.close()
+
+ # dirs
+ if (_levels > 1):
+ dn = j(in_dir, next_unique_basename('dir'))
+ create_tree(dn, _levels - 1, _spread)
+
+ def svn(*args):
+ name = args[0]
+
+ cmd = [ svn_bin ]
+ cmd.extend( list(args) )
+ if verbose:
+ print 'svn cmd:', ' '.join(cmd)
+
+ stdin = None
+ if stdin:
+ stdin_arg = subprocess.PIPE
+ else:
+ stdin_arg = None
+
+ run.tic(name)
+ try:
+ p = subprocess.Popen(cmd,
+ stdin=stdin_arg,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False)
+ stdout,stderr = p.communicate(input=stdin)
+ except OSError:
+ stdout = stderr = None
+ finally:
+ run.toc()
- def do_diff(a, b):
- return float(a) - float(b)
+ if verbose:
+ if (stdout):
+ print "STDOUT: [[[\n%s]]]" % ''.join(stdout)
+ if (stderr):
+ print "STDERR: [[[\n%s]]]" % ''.join(stderr)
- selfname = self.name
- if not selfname:
- selfname = 'unnamed'
- othername = other.name
- if not othername:
- othername = 'the other'
+ return stdout,stderr
- selftotal = self.min_max_avg(TOTAL_RUN)[2]
- othertotal = other.min_max_avg(TOTAL_RUN)[2]
- s = ['COMPARE %s to %s' % (othername, selfname)]
+ def add(*args):
+ return svn('add', *args)
- if TOTAL_RUN in self.timings and TOTAL_RUN in other.timings:
- s.append(' %s times: %5.1f seconds avg for %s' % (TOTAL_RUN,
- othertotal, othername))
- s.append(' %s %5.1f seconds avg for %s' % (' ' * len(TOTAL_RUN),
- selftotal, selfname))
+ def ci(*args):
+ return svn('commit', '-mm', *args)
+ def up(*args):
+ return svn('update', *args)
- s.append(' min max avg operation')
+ def st(*args):
+ return svn('status', *args)
- names = sorted(self.timings.keys())
+ def info(*args):
+ return svn('info', *args)
- for name in names:
- if not name in other.timings:
- continue
+ _chars = [chr(x) for x in range(ord('a'), ord('z') +1)]
+ def randstr(len=8):
+ return ''.join( [random.choice(_chars) for i in range(len)] )
- min_me, max_me, avg_me = self.min_max_avg(name)
- min_other, max_other, avg_other = other.min_max_avg(name)
+ def _copy(path):
+ dest = next_unique_basename(path + '_copied')
+ svn('copy', path, dest)
- s.append('%-16s %-16s %-16s %s' % (
- '%7.2f|%+7.3f' % (
- do_div(min_me, min_other),
- do_diff(min_me, min_other)
- ),
+ def _move(path):
+ dest = path + '_moved'
+ svn('move', path, dest)
- '%7.2f|%+7.3f' % (
- do_div(max_me, max_other),
- do_diff(max_me, max_other)
- ),
+ def _propmod(path):
+ so, se = svn('proplist', path)
+ propnames = [line.strip() for line in so.strip().split('\n')[1:]]
- '%7.2f|%+7.3f' % (
- do_div(avg_me, avg_other),
- do_diff(avg_me, avg_other)
- ),
+ # modify?
+ if len(propnames):
+ svn('ps', propnames[len(propnames) / 2], randstr(), path)
- name))
+ # del?
+ if len(propnames) > 1:
+ svn('propdel', propnames[len(propnames) / 2], path)
- s.extend([
- '("1.23|+0.45" means factor=1.23, difference in seconds = 0.45',
- 'factor < 1 or difference < 0 means \'%s\' is faster than \'%s\')'
- % (self.name, othername)])
+ def _propadd(path):
+ # set a new one.
+ svn('propset', randstr(), randstr(), path)
+
+ def _mod(path):
+ if os.path.isdir(path):
+ _propmod(path)
+ return
+
+ f = open(path, 'a')
+ f.write('\n%s\n' % randstr())
+ f.close()
- return '\n'.join(s)
+ def _add(path):
+ if os.path.isfile(path):
+ return _mod(path)
+
+ if random.choice((True, False)):
+ # create a dir
+ svn('mkdir', j(path, next_unique_basename('new_dir')))
+ else:
+ # create a file
+ new_path = j(path, next_unique_basename('new_file'))
+ f = open(new_path, 'w')
+ f.write(randstr())
+ f.close()
+ svn('add', new_path)
+
+ def _del(path):
+ svn('delete', path)
+
+ _mod_funcs = (_mod, _add, _propmod, _propadd, )#_copy,) # _move, _del)
+
+ def modify_tree(in_dir, fraction):
+ child_names = os.listdir(in_dir)
+ for child_name in child_names:
+ if child_name[0] == '.':
+ continue
+ if random.random() < fraction:
+ path = j(in_dir, child_name)
+ random.choice(_mod_funcs)(path)
+ for child_name in child_names:
+ if child_name[0] == '.': continue
+ path = j(in_dir, child_name)
+ if os.path.isdir(path):
+ modify_tree(path, fraction)
- def add(self, other):
- for name, other_times in other.timings.items():
- my_times = self.timings.get(name)
- if not my_times:
- my_times = []
- self.timings[name] = my_times
- my_times.extend(other_times)
+ def propadd_tree(in_dir, fraction):
+ for child_name in os.listdir(in_dir):
+ if child_name[0] == '.': continue
+ path = j(in_dir, child_name)
+ if random.random() < fraction:
+ _propadd(path)
+ if os.path.isdir(path):
+ propadd_tree(path, fraction)
+ def rmtree_onerror(func, path, exc_info):
+ """Error handler for ``shutil.rmtree``.
+ If the error is due to an access error (read only file)
+ it attempts to add write permission and then retries.
-j = os.path.join
+ If the error is for another reason it re-raises the error.
+
+ Usage : ``shutil.rmtree(path, onerror=onerror)``
+ """
+ if not os.access(path, os.W_OK):
+ # Is the error an access error ?
+ os.chmod(path, stat.S_IWUSR)
+ func(path)
+ else:
+ raise
-_create_count = 0
+ base = tempfile.mkdtemp()
-def next_name(prefix):
- global _create_count
- _create_count += 1
- return '_'.join((prefix, str(_create_count)))
+ # ensure identical modifications for every run
+ random.seed(0)
+
+ aborted = True
-def create_tree(in_dir, levels, spread=5):
try:
- os.mkdir(in_dir)
- except:
- pass
-
- for i in range(spread):
- # files
- fn = j(in_dir, next_name('file'))
- f = open(fn, 'w')
- f.write('This is %s\n' % fn)
- f.close()
+ repos = j(base, 'repos')
+ repos = repos.replace('\\', '/')
+ wc = j(base, 'wc')
+ wc2 = j(base, 'wc2')
- # dirs
- if (levels > 1):
- dn = j(in_dir, next_name('dir'))
- create_tree(dn, levels - 1, spread)
+ if repos.startswith('/'):
+ file_url = 'file://%s' % repos
+ else:
+ file_url = 'file:///%s' % repos
+ print '\nRunning svn benchmark in', base
+ print 'dir levels: %s; new files and dirs per leaf: %s' %(
+ run_kind.levels, run_kind.spread)
-def svn(*args):
- name = args[0]
+ started = datetime.datetime.now()
- ### options comes from the global namespace; it should be passed
- cmd = [options.svn] + list(args)
- if options.verbose:
- print 'svn cmd:', ' '.join(cmd)
+ try:
+ run_cmd([svnadmin_bin, 'create', repos])
+ svn('checkout', file_url, wc)
+
+ trunk = j(wc, 'trunk')
+ create_tree(trunk, run_kind.levels, run_kind.spread)
+ add(trunk)
+ st(wc)
+ ci(wc)
+ up(wc)
+ propadd_tree(trunk, 0.05)
+ ci(wc)
+ up(wc)
+ st(wc)
+ info('-R', wc)
+
+ trunk_url = file_url + '/trunk'
+ branch_url = file_url + '/branch'
+
+ svn('copy', '-mm', trunk_url, branch_url)
+ st(wc)
+
+ up(wc)
+ st(wc)
+ info('-R', wc)
+
+ svn('checkout', trunk_url, wc2)
+ st(wc2)
+ modify_tree(wc2, 0.5)
+ st(wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ svn('switch', branch_url, wc2)
+ modify_tree(wc2, 0.5)
+ st(wc2)
+ info('-R', wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ modify_tree(trunk, 0.5)
+ st(wc)
+ ci(wc)
+ up(wc2)
+ up(wc)
+
+ svn('merge', '--accept=postpone', trunk_url, wc2)
+ st(wc2)
+ info('-R', wc2)
+ svn('resolve', '--accept=mine-conflict', wc2)
+ st(wc2)
+ svn('resolved', '-R', wc2)
+ st(wc2)
+ info('-R', wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ svn('merge', '--accept=postpone', '--reintegrate', branch_url, trunk)
+ st(wc)
+ svn('resolve', '--accept=mine-conflict', wc)
+ st(wc)
+ svn('resolved', '-R', wc)
+ st(wc)
+ ci(wc)
+ up(wc2)
+ up(wc)
+
+ svn('delete', j(wc, 'branch'))
+ ci(wc)
+ up(wc)
+
+ aborted = False
- stdin = None
- if stdin:
- stdin_arg = subprocess.PIPE
- else:
- stdin_arg = None
+ finally:
+ stopped = datetime.datetime.now()
+ print '\nDone with svn benchmark in', (stopped - started)
- ### timings comes from the global namespace; it should be passed
- timings.tic(name)
- try:
- p = subprocess.Popen(cmd,
- stdin=stdin_arg,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=False)
- stdout,stderr = p.communicate(input=stdin)
- except OSError:
- stdout = stderr = None
+ run.remember_timing(TOTAL_RUN,
+ timedelta_to_seconds(stopped - started))
finally:
- timings.toc()
+ run.done(aborted)
+ run.submit_timings()
+ shutil.rmtree(base, onerror=rmtree_onerror)
- if options.verbose:
- if (stdout):
- print "STDOUT: [[[\n%s]]]" % ''.join(stdout)
- if (stderr):
- print "STDERR: [[[\n%s]]]" % ''.join(stderr)
+ return aborted
+
+
+# ---------------------------------------------------------------------
- return stdout,stderr
+def cmdline_run(db, options, run_kind_str, N=1):
+ run_kind = parse_one_timing_selection(db, run_kind_str)
-def add(*args):
- return svn('add', *args)
+ N = int(N)
-def ci(*args):
- return svn('commit', '-mm', *args)
+ print 'Hi, going to run a Subversion benchmark series of %d runs...' % N
+ print 'Label is %s' % run_kind.label()
-def up(*args):
- return svn('update', *args)
+ # can we run the svn binaries?
+ svn_bin = j(options.svn_bin_dir, 'svn')
+ svnadmin_bin = j(options.svn_bin_dir, 'svnadmin')
-def st(*args):
- return svn('status', *args)
+ for b in (svn_bin, svnadmin_bin):
+ so,se = run_cmd([b, '--version'])
+ if not so:
+ bail("Can't run %s" % b)
-_chars = [chr(x) for x in range(ord('a'), ord('z') +1)]
+ print ', '.join([s.strip() for s in so.split('\n')[:2]])
-def randstr(len=8):
- return ''.join( [random.choice(_chars) for i in range(len)] )
+ batch = Batch(db)
-def _copy(path):
- dest = next_name(path + '_copied')
- svn('copy', path, dest)
+ for i in range(N):
+ print 'Run %d of %d' % (i + 1, N)
+ perform_run(batch, run_kind,
+ svn_bin, svnadmin_bin, options.verbose)
-def _move(path):
- dest = path + '_moved'
- svn('move', path, dest)
+ batch.done()
-def _propmod(path):
- so, se = svn('proplist', path)
- propnames = [line.strip() for line in so.strip().split('\n')[1:]]
- # modify?
- if len(propnames):
- svn('ps', propnames[len(propnames) / 2], randstr(), path)
+def cmdline_list(db, options, *args):
+ run_kinds = parse_timings_selections(db, *args)
- # del?
- if len(propnames) > 1:
- svn('propdel', propnames[len(propnames) / 2], path)
+ for run_kind in run_kinds:
+ constraints = []
+ def add_if_not_none(name, val):
+ if val:
+ constraints.append(' %s = %s' % (name, val))
+ add_if_not_none('branch', run_kind.branch)
+ add_if_not_none('revision', run_kind.revision)
+ add_if_not_none('levels', run_kind.levels)
+ add_if_not_none('spread', run_kind.spread)
+ if constraints:
+ print 'For\n', '\n'.join(constraints)
+ print 'I found:'
-def _propadd(path):
- # set a new one.
- svn('propset', randstr(), randstr(), path)
+ d = TimingQuery(db, run_kind)
+ cmd_names = d.get_sorted_command_names()
+ if cmd_names:
+ print '\n%d command names:\n ' % len(cmd_names), '\n '.join(cmd_names)
-def _mod(path):
- if os.path.isdir(path):
- return _propmod(path)
+ branches = d.get_sorted_branches()
+ if branches and (len(branches) > 1 or branches[0] != run_kind.branch):
+ print '\n%d branches:\n ' % len(branches), '\n '.join(branches)
- f = open(path, 'a')
- f.write('\n%s\n' % randstr())
- f.close()
+ revisions = d.get_sorted_revisions()
+ if revisions and (len(revisions) > 1 or revisions[0] != run_kind.revision):
+ print '\n%d revisions:\n ' % len(revisions), '\n '.join(revisions)
-def _add(path):
- if os.path.isfile(path):
- return _mod(path)
+ levels_spread = d.get_sorted_levels_spread()
+ if levels_spread and (
+ len(levels_spread) > 1
+ or levels_spread[0] != (run_kind.levels, run_kind.spread)):
+ print '\n%d kinds of levels x spread:\n ' % len(levels_spread), '\n '.join(
+ [ ('%dx%d' % (l, s)) for l,s in levels_spread ])
- if random.choice((True, False)):
- # create a dir
- svn('mkdir', j(path, next_name('new_dir')))
- else:
- # create a file
- new_path = j(path, next_name('new_file'))
- f = open(new_path, 'w')
- f.write(randstr())
- f.close()
- svn('add', new_path)
+ print "\n%d runs in %d batches.\n" % (d.count_runs_batches())
-def _del(path):
- svn('delete', path)
-_mod_funcs = (_mod, _add, _propmod, _propadd, )#_copy,) # _move, _del)
+def cmdline_show(db, options, *run_kind_strings):
+ run_kinds = parse_timings_selections(db, *run_kind_strings)
+ for run_kind in run_kinds:
+ q = TimingQuery(db, run_kind)
+ timings = q.get_timings()
-def modify_tree(in_dir, fraction):
- child_names = os.listdir(in_dir)
- for child_name in child_names:
- if child_name[0] == '.':
- continue
- if random.random() < fraction:
- path = j(in_dir, child_name)
- random.choice(_mod_funcs)(path)
+ s = []
+ s.append('Timings for %s' % run_kind.label())
+ s.append(' N min max avg operation (unit is seconds)')
- for child_name in child_names:
- if child_name[0] == '.': continue
- path = j(in_dir, child_name)
- if os.path.isdir(path):
- modify_tree(path, fraction)
-
-def propadd_tree(in_dir, fraction):
- for child_name in os.listdir(in_dir):
- if child_name[0] == '.': continue
- path = j(in_dir, child_name)
- if random.random() < fraction:
- _propadd(path)
- if os.path.isdir(path):
- propadd_tree(path, fraction)
+ for command_name in q.get_sorted_command_names():
+ if options.command_names and command_name not in options.command_names:
+ continue
+ n, tmin, tmax, tavg = timings[command_name]
+ s.append('%4s %7.2f %7.2f %7.2f %s' % (
+ n_label(n),
+ tmin,
+ tmax,
+ tavg,
+ command_name))
-def rmtree_onerror(func, path, exc_info):
- """Error handler for ``shutil.rmtree``.
+ print '\n'.join(s)
- If the error is due to an access error (read only file)
- it attempts to add write permission and then retries.
- If the error is for another reason it re-raises the error.
+def cmdline_compare(db, options, *args):
+ run_kinds = parse_timings_selections(db, *args)
+ if len(run_kinds) < 2:
+ bail("Need at least two sets of timings to compare.")
- Usage : ``shutil.rmtree(path, onerror=onerror)``
- """
- if not os.access(path, os.W_OK):
- # Is the error an access error ?
- os.chmod(path, stat.S_IWUSR)
- func(path)
- else:
- raise
+ left_kind = run_kinds[0]
+ leftq = TimingQuery(db, left_kind)
+ left = leftq.get_timings()
+ if not left:
+ bail("No timings for %s" % left_kind.label())
-def run(levels, spread, N):
- for i in range(N):
- base = tempfile.mkdtemp()
+ for run_kind_idx in range(1, len(run_kinds)):
+ right_kind = run_kinds[run_kind_idx]
- # ensure identical modifications for every run
- random.seed(0)
+ rightq = TimingQuery(db, right_kind)
+ right = rightq.get_timings()
+ if not right:
+ print "No timings for %s" % right_kind.label()
+ continue
- try:
- repos = j(base, 'repos')
- repos = repos.replace('\\', '/')
- wc = j(base, 'wc')
- wc2 = j(base, 'wc2')
+ label = 'Compare %s to %s' % (right_kind.label(), left_kind.label())
- if repos.startswith('/'):
- file_url = 'file://%s' % repos
- else:
- file_url = 'file:///%s' % repos
+ s = [label]
- so, se = svn('--version')
- if not so:
- print "Can't find svn."
- exit(1)
- version = ', '.join([s.strip() for s in so.split('\n')[:2]])
+ verbose = options.verbose
+ if not verbose:
+ s.append(' N avg operation')
+ else:
+ s.append(' N min max avg operation')
- print '\nRunning svn benchmark in', base
- print 'dir levels: %s; new files and dirs per leaf: %s; run %d of %d' %(
- levels, spread, i + 1, N)
+ command_names = [name for name in leftq.get_sorted_command_names()
+ if name in right]
+ if options.command_names:
+ command_names = [name for name in command_names
+ if name in options.command_names]
- print version
- started = datetime.datetime.now()
+ for command_name in command_names:
+ left_N, left_min, left_max, left_avg = left[command_name]
+ right_N, right_min, right_max, right_avg = right[command_name]
- try:
- run_cmd(['svnadmin', 'create', repos])
- svn('checkout', file_url, wc)
-
- trunk = j(wc, 'trunk')
- create_tree(trunk, levels, spread)
- add(trunk)
- st(wc)
- ci(wc)
- up(wc)
- propadd_tree(trunk, 0.5)
- ci(wc)
- up(wc)
- st(wc)
-
- trunk_url = file_url + '/trunk'
- branch_url = file_url + '/branch'
-
- svn('copy', '-mm', trunk_url, branch_url)
- st(wc)
-
- up(wc)
- st(wc)
-
- svn('checkout', trunk_url, wc2)
- st(wc2)
- modify_tree(wc2, 0.5)
- st(wc2)
- ci(wc2)
- up(wc2)
- up(wc)
-
- svn('switch', branch_url, wc2)
- modify_tree(wc2, 0.5)
- st(wc2)
- ci(wc2)
- up(wc2)
- up(wc)
-
- modify_tree(trunk, 0.5)
- st(wc)
- ci(wc)
- up(wc2)
- up(wc)
-
- svn('merge', '--accept=postpone', trunk_url, wc2)
- st(wc2)
- svn('resolve', '--accept=mine-conflict', wc2)
- st(wc2)
- svn('resolved', '-R', wc2)
- st(wc2)
- ci(wc2)
- up(wc2)
- up(wc)
-
- svn('merge', '--accept=postpone', '--reintegrate', branch_url, trunk)
- st(wc)
- svn('resolve', '--accept=mine-conflict', wc)
- st(wc)
- svn('resolved', '-R', wc)
- st(wc)
- ci(wc)
- up(wc2)
- up(wc)
-
- svn('delete', j(wc, 'branch'))
- ci(wc)
- up(wc2)
- up(wc)
+ N_str = '%s/%s' % (n_label(left_N), n_label(right_N))
+ avg_str = '%7.2f|%+7.3f' % (do_div(left_avg, right_avg),
+ do_diff(left_avg, right_avg))
+
+ if not verbose:
+ s.append('%9s %-16s %s' % (N_str, avg_str, command_name))
+ else:
+ min_str = '%7.2f|%+7.3f' % (do_div(left_min, right_min),
+ do_diff(left_min, right_min))
+ max_str = '%7.2f|%+7.3f' % (do_div(left_max, right_max),
+ do_diff(left_max, right_max))
+ s.append('%9s %-16s %-16s %-16s %s' % (N_str, min_str, max_str, avg_str,
+ command_name))
- finally:
- stopped = datetime.datetime.now()
- print '\nDone with svn benchmark in', (stopped - started)
-
- ### timings comes from the global namespace; it should be passed
- timings.submit_timing(TOTAL_RUN,
- timedelta_to_seconds(stopped - started))
-
- # rename ps to prop mod
- if timings.timings.get('ps'):
- has = timings.timings.get('prop mod')
- if not has:
- has = []
- timings.timings['prop mod'] = has
- has.extend( timings.timings['ps'] )
- del timings.timings['ps']
-
- print timings.summary()
- finally:
- shutil.rmtree(base, onerror=rmtree_onerror)
+ s.extend([
+ '(legend: "1.23|+0.45" means: slower by factor 1.23 and by 0.45 seconds;',
+ ' factor < 1 and seconds < 0 means \'%s\' is faster.'
+ % right_kind.label(),
+ ' "2/3" means: \'%s\' has 2 timings on record, the other has 3.)'
+ % left_kind.label()
+ ])
-def read_from_file(file_path):
- f = open(file_path, 'rb')
- try:
- instance = cPickle.load(f)
- instance.name = os.path.basename(file_path)
- finally:
- f.close()
- return instance
+ print '\n'.join(s)
-def write_to_file(file_path, instance):
- f = open(file_path, 'wb')
- cPickle.dump(instance, f)
- f.close()
+# ------------------------------------------------------- charts
-def cmd_compare(path1, path2):
- t1 = read_from_file(path1)
- t2 = read_from_file(path2)
+def cmdline_chart_compare(db, options, *args):
+ import matplotlib
+ matplotlib.use('Agg')
+ import numpy as np
+ import matplotlib.pylab as plt
- print t1.summary()
- print '---'
- print t2.summary()
- print '---'
- print t2.compare_to(t1)
+ labels = []
+ timing_sets = []
+ command_names = None
-def cmd_combine(dest, *paths):
- total = Timings('--version');
+ run_kinds = parse_timings_selections(db, *args)
- for path in paths:
- t = read_from_file(path)
- total.add(t)
+ # iterate the timings selections and accumulate data
+ for run_kind in run_kinds:
+ query = TimingQuery(db, run_kind)
+ timings = query.get_timings()
+ if not timings:
+ print "No timings for %s" % run_kind.label()
+ continue
+ labels.append(run_kind.label())
+ timing_sets.append(timings)
- print total.summary()
- write_to_file(dest, total)
+ # it only makes sense to compare those commands that have timings
+ # in the first selection, because that is the one everything else
+ # is compared to. Remember the first selection's command names.
+ if not command_names:
+ command_names = query.get_sorted_command_names()
-def cmd_run(timings_path, levels, spread, N=1):
- levels = int(levels)
- spread = int(spread)
- N = int(N)
- print '\n\nHi, going to run a Subversion benchmark series of %d runs...' % N
+ if len(timing_sets) < 2:
+ bail("Not enough timings")
- ### UGH! should pass to run()
- global timings
+ if options.command_names:
+ command_names = [name for name in command_names
+ if name in options.command_names]
- if os.path.isfile(timings_path):
- print 'Going to add results to existing file', timings_path
- timings = read_from_file(timings_path)
- else:
- print 'Going to write results to new file', timings_path
- timings = Timings('--version')
+ chart_path = options.chart_path
+ if not chart_path:
+ chart_path = 'compare_' + '_'.join(
+ [ filesystem_safe_string(l) for l in labels ]
+ ) + '.svg'
- run(levels, spread, N)
+ N = len(command_names)
+ M = len(timing_sets) - 1
+ if M < 2:
+ M = 2
- write_to_file(timings_path, timings)
+ group_positions = np.arange(N) # the y locations for the groups
+ dist = 1. / (1. + M)
+ height = (1. - dist) / M # the height of the bars
-def cmd_show(*paths):
- for timings_path in paths:
- timings = read_from_file(timings_path)
- print '---\n%s' % timings_path
- print timings.summary()
+ fig = plt.figure(figsize=(12, 5 + 0.2*N*M))
+ plot1 = fig.add_subplot(121)
+ plot2 = fig.add_subplot(122)
+ left = timing_sets[0]
-def usage():
- print __doc__
+ # Iterate timing sets. Each loop produces one bar for each command name
+ # group.
+ for label_i,label in enumerate(labels[1:],1):
+ right = timing_sets[label_i]
+ if not right:
+ continue
+
+ for cmd_i, command_name in enumerate(command_names):
+ if command_name not in right:
+ #skip
+ continue
+
+ left_N, left_min, left_max, left_avg = left[command_name]
+ right_N, right_min, right_max, right_avg = right[command_name]
+
+ div_avg = 100. * (do_div(left_avg, right_avg) - 1.0)
+ if div_avg <= 0:
+ col = '#55dd55'
+ else:
+ col = '#dd5555'
+
+ diff_val = do_diff(left_avg, right_avg)
+
+ ofs = (dist + height) / 2. + height * (label_i - 1)
+
+ barheight = height * (1.0 - dist)
+
+ y = float(cmd_i) + ofs
+
+ plot1.barh((y, ),
+ (div_avg, ),
+ barheight,
+ color=col, edgecolor='white')
+ plot1.text(0., y + height/2.,
+ '%s %+5.1f%%' % (label, div_avg),
+ ha='right', va='center', size='small',
+ rotation=0, family='monospace')
+
+ plot2.barh((y, ),
+ (diff_val, ),
+ barheight,
+ color=col, edgecolor='white')
+ plot2.text(0., y + height/2.,
+ '%s %+6.2fs' % (label, diff_val),
+ ha='right', va='center', size='small',
+ rotation=0, family='monospace')
+
+
+ for p in (plot1, plot2):
+ xlim = list(p.get_xlim())
+ if xlim[1] < 10.:
+ xlim[1] = 10.
+ # make sure the zero line is far enough right so that the annotations
+ # fit inside the chart. About half the width should suffice.
+ if xlim[0] > -xlim[1]:
+ xlim[0] = -xlim[1]
+ p.set_xlim(*xlim)
+ p.set_xticks((0,))
+ p.set_yticks(group_positions + (height / 2.))
+ p.set_yticklabels(())
+ p.set_ylim((len(command_names), 0))
+ p.grid()
+
+ plot1.set_xticklabels(('+-0%',), rotation=0)
+ plot1.set_title('Average runtime change from %s in %%' % labels[0],
+ size='medium')
+
+ plot2.set_xticklabels(('+-0s',), rotation=0)
+ plot2.set_title('Average runtime change from %s in seconds' % labels[0],
+ size='medium')
+
+ margin = 1./(2 + N*M)
+ titlemargin = 0
+ if options.title:
+ titlemargin = margin * 1.5
+
+ fig.subplots_adjust(left=0.005, right=0.995, wspace=0.3, bottom=margin,
+ top=1.0-margin-titlemargin)
+
+ ystep = (1.0 - 2.*margin - titlemargin) / len(command_names)
+
+ for idx,command_name in enumerate(command_names):
+ ylabel = '%s\nvs. %.1fs' % (
+ command_name,
+ left[command_name][3])
+
+ ypos=1.0 - margin - titlemargin - ystep/M - ystep * idx
+ plt.figtext(0.5, ypos,
+ command_name,
+ ha='center', va='top',
+ size='medium', weight='bold')
+ plt.figtext(0.5, ypos - ystep/(M+1),
+ '%s\n= %.2fs' % (
+ labels[0], left[command_name][3]),
+ ha='center', va='top',
+ size='small')
+
+ if options.title:
+ plt.figtext(0.5, 1. - titlemargin/2, options.title, ha='center',
+ va='center', weight='bold')
+
+ plt.savefig(chart_path)
+ print 'wrote chart file:', chart_path
+
+
+# ------------------------------------------------------------ main
+
+
+# Custom option formatter, keeping newlines in the description.
+# adapted from:
+# http://groups.google.com/group/comp.lang.python/msg/09f28e26af0699b1
+import textwrap
+class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
+ def format_description(self, description):
+ if not description: return ""
+ desc_width = self.width - self.current_indent
+ indent = " "*self.current_indent
+ bits = description.split('\n')
+ formatted_bits = [
+ textwrap.fill(bit,
+ desc_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+ for bit in bits]
+ result = "\n".join(formatted_bits) + "\n"
+ return result
if __name__ == '__main__':
- parser = optparse.OptionParser()
+ parser = optparse.OptionParser(formatter=IndentedHelpFormatterWithNL())
# -h is automatically added.
### should probably expand the help for that. and see about -?
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
help='Verbose operation')
- parser.add_option('--svn', action='store', dest='svn', default='svn',
- help='Specify Subversion executable to use')
+ parser.add_option('-b', '--svn-bin-dir', action='store', dest='svn_bin_dir',
+ default='',
+ help='Specify directory to find Subversion binaries in')
+ parser.add_option('-f', '--db-path', action='store', dest='db_path',
+ default='benchmark.db',
+ help='Specify path to SQLite database file')
+ parser.add_option('-o', '--chart-path', action='store', dest='chart_path',
+ help='Supply a path for chart output.')
+ parser.add_option('-c', '--command-names', action='store',
+ dest='command_names',
+ help='Comma separated list of command names to limit to.')
+ parser.add_option('-t', '--title', action='store',
+ dest='title',
+ help='For charts, a title to print in the chart graphics.')
+
+ parser.set_description(__doc__)
+ parser.set_usage('')
- ### should start passing this, but for now: make it global
- global options
options, args = parser.parse_args()
+ def usage(msg=None):
+ parser.print_help()
+ if msg:
+ print
+ print msg
+ bail()
+
# there should be at least one arg left: the sub-command
if not args:
- usage()
- exit(1)
+ usage('No command argument supplied.')
cmd = args[0]
del args[0]
- if cmd == 'compare':
- if len(args) != 2:
- usage()
- exit(1)
- cmd_compare(*args)
+ db = TimingsDb(options.db_path)
- elif cmd == 'combine':
- if len(args) < 3:
+ if cmd == 'run':
+ if len(args) < 1 or len(args) > 2:
usage()
- exit(1)
- cmd_combine(*args)
+ cmdline_run(db, options, *args)
- elif cmd == 'run':
- if len(args) < 3 or len(args) > 4:
+ elif cmd == 'compare':
+ if len(args) < 2:
usage()
- exit(1)
- cmd_run(*args)
+ cmdline_compare(db, options, *args)
+
+ elif cmd == 'list':
+ cmdline_list(db, options, *args)
elif cmd == 'show':
- if not args:
+ cmdline_show(db, options, *args)
+
+ elif cmd == 'chart':
+ if 'compare'.startswith(args[0]):
+ cmdline_chart_compare(db, options, *args[1:])
+ else:
usage()
- exit(1)
- cmd_show(*args)
else:
- usage()
+ usage('Unknown subcommand argument: %s' % cmd)
diff --git a/tools/dev/benchmarks/suite1/cronjob b/tools/dev/benchmarks/suite1/cronjob
index ca8b632..5b74292 100755
--- a/tools/dev/benchmarks/suite1/cronjob
+++ b/tools/dev/benchmarks/suite1/cronjob
@@ -1,16 +1,36 @@
#!/bin/bash
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
# This is the cronjob as run on our ASF box aka svn-qavm.
# It uses neels' mad bash script magic called 'pat' to update and
# build the latest trunk, invokes a benchmark and sends as mail.
+#
+# A word on 'pat': this is a grown-out-of-proportions bash script that holds
+# all the small and large tasks that I do while developing on Subversion.
+# While it works for me, it's not particularly beautifully coded --
+# wouldn't publish it in Subversion's trunk, but if you want to find out
+# what it does: http://hofmeyr.de/pat/
-#EMAILS=your@email.addresses
-EMAILS=""
-
-if [ "$USER" = "neels" ]; then
- # I don't want to keep editing files after every update. ~Neels
- EMAILS=dev@subversion.apache.org
-fi
-
+#EMAILS=your@ema.il add@ress.es
+EMAILS=dev@subversion.apache.org
echo
echo "--------------------------------------------------------------------"
@@ -19,12 +39,16 @@ echo
results="$(tempfile)"
+benchdir=/home/neels/svnbench
+patbin=/home/neels/bin/pat
+patbase=/home/neels/pat
+
# first update trunk to HEAD and rebuild.
# update/build is logged to the cronjob log (via stdout)
-cd /home/neels/pat/trunk
-/home/neels/bin/pat update
+cd "$patbase/trunk"
+"$patbin" update
if [ "$?" -ne "0" ]; then
subject="Failed to update to HEAD."
@@ -32,14 +56,14 @@ if [ "$?" -ne "0" ]; then
echo "$subject"
else
- rev="$(svn info /home/neels/pat/trunk/src | grep Revision)"
+ rev="$("$patbase"/stable/prefix/bin/svn info "$patbase"/trunk/src | grep Revision)"
if [ -z "$rev" ]; then
subject="Working copy problem."
echo "$subject" > "$results"
echo "$subject"
else
- /home/neels/bin/pat remake
+ NONMAINTAINER=1 "$patbin" remake
if [ "$?" -ne "0" ]; then
subject="Failed to build $rev."
echo "$subject" > "$results"
@@ -50,10 +74,10 @@ else
# updating and building succeeded!
# run the benchmark:
- compiled="$(/home/neels/pat/trunk/prefix/bin/svn --version | grep "compiled")"
+ compiled="$("$patbase"/trunk/prefix/bin/svn --version | grep "compiled")"
subject="$rev$compiled"
- cd /home/neels/svnbench/
+ cd "$benchdir"
# make more or less sure that runs don't leak into each other via
# I/O caching.
@@ -62,8 +86,8 @@ else
# basically, just run it. But also, I want to
# - append output to stdout, for cronjob logging.
# - send output as mail, but only this run's output less update&build
- "$(which time)" -p ./run 2>&1 | tee "$results"
-
+ time -p ./run 2>&1 | tee "$results"
+ time -p ./generate_charts 2>&1 | tee -a "$results"
fi
fi
fi
diff --git a/tools/dev/benchmarks/suite1/generate_charts b/tools/dev/benchmarks/suite1/generate_charts
new file mode 100755
index 0000000..8e16526
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/generate_charts
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+SVN_A_NAME="1.7.0"
+SVN_B_NAME="trunk"
+
+# benchmark script and parameters...
+benchmark="$PWD/benchmark.py"
+
+db="$PWD/${SVNBENCH_DEBUG}benchmark.db"
+
+chartsdir="$PWD/charts"
+
+mkdir -p "$chartsdir/.busy"
+
+if [ ! -e "$chartsdir/README" ]; then
+ cat > "$chartsdir/README" <<END
+These charts are generated by svn benchmark suite1.
+http://svn.apache.org/repos/asf/subversion/trunk/tools/dev/benchmarks/suite1
+
+*DISCLAIMER* - This tests only file://-URL access on a GNU/Linux VM.
+This is intended to measure changes in performance of the local working
+copy layer, *only*. These results are *not* generally true for everyone.
+END
+fi
+
+for levelspread in "" 5x5 1x100 100x1; do
+ if [ -z "$levelspread" ]; then
+ lsarg=""
+ lstitle=""
+ else
+ lsarg=",$levelspread"
+ lstitle=", WC dir levels x spread = $levelspread"
+ fi
+ N=12
+ "$benchmark" -f "$db" chart compare \
+ ${SVN_A_NAME}$lsarg ${SVN_B_NAME}@last${N}$lsarg \
+ -o "$chartsdir/.busy/compare_${SVN_A_NAME}_${SVN_B_NAME}@last${N}$lsarg.svg" \
+ -t "svn client benchmarks, file:// access *only*$lstitle"
+done
+
+mv "$chartsdir/.busy/"*.svg "$chartsdir/"
+rmdir "$chartsdir/.busy"
+
diff --git a/tools/dev/benchmarks/suite1/run b/tools/dev/benchmarks/suite1/run
index ce02fb3..c146ea0 100755
--- a/tools/dev/benchmarks/suite1/run
+++ b/tools/dev/benchmarks/suite1/run
@@ -17,39 +17,53 @@
# specific language governing permissions and limitations
# under the License.
-# Where are the svn binaries you want to benchmark?
-if [ "$USER" = "neels" ]; then
- SVN_1_6="$HOME/pat/stable/prefix/bin/svn"
- SVN_trunk="$HOME/pat/trunk/prefix/bin/svn"
-else
- SVN_1_6="$HOME/src/svn-1.6.x/subversion/svn/svn"
- SVN_trunk="$HOME/src/svn/subversion/svn/svn"
+# debug? Just uncomment.
+#SVNBENCH_DEBUG=DEBUG_
+if [ -n "$SVNBENCH_DEBUG" ]; then
+ SVNBENCH_DEBUG="DEBUG_"
fi
-benchmark="$PWD/benchmark.py"
+# Subversion bin-dir used for maintenance of working copies
+SVN_STABLE="$HOME/pat/stable/prefix/bin/"
+
+# Where to find the svn binaries you want to benchmark, what are their labels
+# and Last Changed Revisions?
+# side A
+SVN_A_NAME="1.7.0"
+SVN_A="$HOME/pat/bench/prefix/bin"
+SVN_A_REV="$("$SVN_STABLE"/svnversion -c "$HOME/pat/bench/src" | sed 's/.*://')"
-parent="$(date +"%Y%m%d-%H%M%S")"
-inital_workdir="$PWD"
-mkdir "$parent"
-cd "$parent"
-pwd
+# side B
+SVN_B_NAME="trunk"
+SVN_B="$HOME/pat/trunk/prefix/bin"
+SVN_B_REV="$("$SVN_STABLE"/svnversion -c "$HOME/pat/trunk/src" | sed 's/.*://')"
+
+echo "$SVN_A_NAME@$SVN_A_REV vs. $SVN_B_NAME@$SVN_B_REV"
+
+# benchmark script and parameters...
+benchmark="$PWD/benchmark.py"
+db="$PWD/${SVNBENCH_DEBUG}benchmark.db"
batch(){
levels="$1"
spread="$2"
N="$3"
- pre="${levels}x${spread}_"
- echo
- echo "---------------------------------------------------------------------"
- echo
- echo "Results for dir levels: $levels spread: $spread"
- "$benchmark" "--svn=${SVN_1_6}" run ${pre}1.6 $levels $spread $N >/dev/null
- "$benchmark" "--svn=${SVN_trunk}" run ${pre}trunk $levels $spread $N > /dev/null
- "$benchmark" compare ${pre}1.6 ${pre}trunk
+
+ # SVN_A is a fixed tag, currently 1.7.0. For each call, run this once.
+ # It will be called again and again for each trunk build being tested,
+ # that's why we don't really need to run it $N times every time.
+ N_for_A=1
+ "$benchmark" "--db-path=$db" "--svn-bin-dir=$SVN_A" \
+ run "$SVN_A_NAME@$SVN_A_REV,${levels}x$spread" "$N_for_A" >/dev/null
+
+ # SVN_B is a branch, i.e. the moving target, benchmarked at a specific
+ # point in history each time this script is called. Run this $N times.
+ "$benchmark" "--db-path=$db" "--svn-bin-dir=$SVN_B" \
+ run "$SVN_B_NAME@$SVN_B_REV,${levels}x$spread" $N >/dev/null
}
-N=6
+N=3
al=5
as=5
bl=100
@@ -57,42 +71,75 @@ bs=1
cl=1
cs=100
-##DEBUG
-#N=1
-#al=1
-#as=1
-#bl=2
-#bs=1
-#cl=1
-#cs=2
-##DEBUG
+if [ -n "$SVNBENCH_DEBUG" ]; then
+ echo "DEBUG"
+ N=1
+ al=1
+ as=1
+ bl=2
+ bs=1
+ cl=1
+ cs=2
+fi
{
started="$(date)"
echo "Started at $started"
+
+echo "
+*DISCLAIMER* - This tests only file://-URL access on a GNU/Linux VM.
+This is intended to measure changes in performance of the local working
+copy layer, *only*. These results are *not* generally true for everyone.
+
+Charts of this data are available at http://svn-qavm.apache.org/charts/"
+
+if [ -z "$SVNBENCH_SUMMARY_ONLY" ]; then
+ batch $al $as $N
+ batch $bl $bs $N
+ batch $cl $cs $N
+else
+ echo "(not running benchmarks, just printing results on record.)"
+fi
+
+echo ""
+echo "Averaged-total results across all runs:"
+echo "---------------------------------------"
echo ""
+"$benchmark" "--db-path=$db" \
+ compare "$SVN_A_NAME" "$SVN_B_NAME@$SVN_B_REV"
-batch $al $as $N
-batch $bl $bs $N
-batch $cl $cs $N
+echo ""
+echo ""
+echo "Above totals split into separate <dir-levels>x<dir-spread> runs:"
+echo "----------------------------------------------------------------"
+echo ""
+
+for lvlspr in "${al}x${as}" "${bl}x${bs}" "${cl}x${cs}"; do
+ "$benchmark" "--db-path=$db" \
+ compare "$SVN_A_NAME,$lvlspr" "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo ""
+done
echo ""
-echo =========================================================================
echo ""
-"$benchmark" combine total_1.6 *x*_1.6 >/dev/null
-"$benchmark" combine total_trunk *x*_trunk >/dev/null
+echo "More detail:"
+echo "------------"
+echo ""
-echo "comparing averaged totals..."
-"$benchmark" compare total_1.6 total_trunk
+for lvlspr in "${al}x${as}" "${bl}x${bs}" "${cl}x${cs}" "" ; do
+ "$benchmark" "--db-path=$db" show "$SVN_A_NAME,$lvlspr"
+ echo --
+ "$benchmark" "--db-path=$db" show "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo --
+ "$benchmark" "--db-path=$db" \
+ compare -v "$SVN_A_NAME,$lvlspr" "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo ""
+ echo ""
+done
echo ""
echo "Had started at $started,"
echo " done at $(date)"
-pwd
} 2>&1 | tee results.txt
-cd "$inital_workdir"
-if [ -f "$parent/total_trunk" ]; then
- rm -rf "$parent"
-fi
diff --git a/tools/dev/benchmarks/suite1/run.bat b/tools/dev/benchmarks/suite1/run.bat
index b2c71e7..6d3d466 100644
--- a/tools/dev/benchmarks/suite1/run.bat
+++ b/tools/dev/benchmarks/suite1/run.bat
@@ -1,101 +1,105 @@
-:: Licensed to the Apache Software Foundation (ASF) under one
-:: or more contributor license agreements. See the NOTICE file
-:: distributed with this work for additional information
-:: regarding copyright ownership. The ASF licenses this file
-:: to you under the Apache License, Version 2.0 (the
-:: "License"); you may not use this file except in compliance
-:: with the License. You may obtain a copy of the License at
-::
-:: http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing,
-:: software distributed under the License is distributed on an
-:: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-:: KIND, either express or implied. See the License for the
-:: specific language governing permissions and limitations
-:: under the License.
-
-@ECHO OFF
-SETLOCAL EnableDelayedExpansion
-
-:: Where are the svn binaries you want to benchmark?
-SET SVN_1_6=C:\path\to\1.6-svn\bin\svn
-SET SVN_trunk=C:\path\to\trunk-svn\bin\svn
-
-SET benchmark=%CD%\benchmark.py
-
-SET my_datetime=%date%-%time%
-SET my_datetime=%my_datetime: =_%
-SET my_datetime=%my_datetime:/=_%
-SET my_datetime=%my_datetime::=%
-SET my_datetime=%my_datetime:.=%
-SET my_datetime=%my_datetime:,=%
-SET parent=%my_datetime%
-SET inital_workdir=%CD%
-mkdir "%parent%"
-cd "%parent%"
-ECHO %CD%
-
-GOTO main
-
-:batch
- SET levels=%1
- SET spread=%2
- SET N=%3
- SET pre=%levels%x%spread%_
- ECHO.
- ECHO.---------------------------------------------------------------------
- ECHO.
- ECHO.Results for dir levels: %levels% spread: %spread%
- CALL "%benchmark%" --svn="%SVN_1_6%" run %pre%1.6 %levels% %spread% %N% > NUL
- CALL "%benchmark%" --svn="%SVN_trunk%" run %pre%trunk %levels% %spread% %N% > NUL
- CALL "%benchmark%" compare %pre%1.6 %pre%trunk
- GOTO :EOF
-
-:main
-SET N=6
-SET al=5
-SET as=5
-SET bl=25
-SET bs=1
-SET cl=1
-SET cs=100
-
-::::DEBUG
-::SET N=1
-::SET al=1
-::SET as=1
-::SET bl=2
-::SET bs=1
-::SET cl=1
-::SET cs=2
-::::DEBUG
-
-SET started=%date%-%time%
-ECHO.Started at %started%
-ECHO.
-
-CALL :batch %al% %as% %N%
-CALL :batch %bl% %bs% %N%
-CALL :batch %cl% %cs% %N%
-
-ECHO.
-ECHO.=========================================================================
-ECHO.
-FOR %%F IN (*x*_1.6) DO SET all_1.6=!all_1.6! %%F
-CALL "%benchmark%" combine total_1.6 %all_1.6% > NUL
-FOR %%F IN (*x*_trunk) DO SET all_trunk=!all_trunk! %%F
-CALL "%benchmark%" combine total_trunk %all_trunk% > NUL
-
-ECHO.comparing averaged totals..."
-CALL "%benchmark%" compare total_1.6 total_trunk
-
-ECHO.
-ECHO.Had started at %started%,
-ECHO. done at %date%-%time%
-ECHO %CD%
-
-cd "%inital_workdir%"
-IF EXIST %parent%\total_trunk rmdir /S /Q "%parent%"
-
-ENDLOCAL
+:: Licensed to the Apache Software Foundation (ASF) under one
+:: or more contributor license agreements. See the NOTICE file
+:: distributed with this work for additional information
+:: regarding copyright ownership. The ASF licenses this file
+:: to you under the Apache License, Version 2.0 (the
+:: "License"); you may not use this file except in compliance
+:: with the License. You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing,
+:: software distributed under the License is distributed on an
+:: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+:: KIND, either express or implied. See the License for the
+:: specific language governing permissions and limitations
+:: under the License.
+
+@ECHO OFF
+
+ECHO.THIS SCRIPT IS CURRENTLY OUTDATED.
+GOTO :EOF
+
+SETLOCAL EnableDelayedExpansion
+
+:: Where are the svn binaries you want to benchmark?
+SET SVN_1_6=C:\path\to\1.6-svn\bin\svn
+SET SVN_trunk=C:\path\to\trunk-svn\bin\svn
+
+SET benchmark=%CD%\benchmark.py
+
+SET my_datetime=%date%-%time%
+SET my_datetime=%my_datetime: =_%
+SET my_datetime=%my_datetime:/=_%
+SET my_datetime=%my_datetime::=%
+SET my_datetime=%my_datetime:.=%
+SET my_datetime=%my_datetime:,=%
+SET parent=%my_datetime%
+SET inital_workdir=%CD%
+mkdir "%parent%"
+cd "%parent%"
+ECHO %CD%
+
+GOTO main
+
+:batch
+ SET levels=%1
+ SET spread=%2
+ SET N=%3
+ SET pre=%levels%x%spread%_
+ ECHO.
+ ECHO.---------------------------------------------------------------------
+ ECHO.
+ ECHO.Results for dir levels: %levels% spread: %spread%
+ CALL "%benchmark%" --svn="%SVN_1_6%" run %pre%1.6 %levels% %spread% %N% > NUL
+ CALL "%benchmark%" --svn="%SVN_trunk%" run %pre%trunk %levels% %spread% %N% > NUL
+ CALL "%benchmark%" compare %pre%1.6 %pre%trunk
+ GOTO :EOF
+
+:main
+SET N=6
+SET al=5
+SET as=5
+SET bl=25
+SET bs=1
+SET cl=1
+SET cs=100
+
+::::DEBUG
+::SET N=1
+::SET al=1
+::SET as=1
+::SET bl=2
+::SET bs=1
+::SET cl=1
+::SET cs=2
+::::DEBUG
+
+SET started=%date%-%time%
+ECHO.Started at %started%
+ECHO.
+
+CALL :batch %al% %as% %N%
+CALL :batch %bl% %bs% %N%
+CALL :batch %cl% %cs% %N%
+
+ECHO.
+ECHO.=========================================================================
+ECHO.
+FOR %%F IN (*x*_1.6) DO SET all_1.6=!all_1.6! %%F
+CALL "%benchmark%" combine total_1.6 %all_1.6% > NUL
+FOR %%F IN (*x*_trunk) DO SET all_trunk=!all_trunk! %%F
+CALL "%benchmark%" combine total_trunk %all_trunk% > NUL
+
+ECHO.comparing averaged totals..."
+CALL "%benchmark%" compare total_1.6 total_trunk
+
+ECHO.
+ECHO.Had started at %started%,
+ECHO. done at %date%-%time%
+ECHO %CD%
+
+cd "%inital_workdir%"
+IF EXIST %parent%\total_trunk rmdir /S /Q "%parent%"
+
+ENDLOCAL
diff --git a/tools/dev/contribulyze.py b/tools/dev/contribulyze.py
index 58123b0..1730c57 100755
--- a/tools/dev/contribulyze.py
+++ b/tools/dev/contribulyze.py
@@ -511,8 +511,13 @@ class LogMessage(object):
log_separator = '-' * 72 + '\n'
log_header_re = re.compile\
('^(r[0-9]+) \| ([^|]+) \| ([^|]+) \| ([0-9]+)[^0-9]')
-field_re = re.compile('^(Patch|Review(ed)?|Suggested|Found|Inspired) by:\s*\S.*$')
-field_aliases = { 'Reviewed' : 'Review' }
+field_re = re.compile(
+ '^(Patch|Review(ed)?|Suggested|Found|Inspired|Tested|Reported) by:'
+ '\s*\S.*$')
+field_aliases = {
+ 'Reviewed' : 'Review',
+ 'Reported' : 'Found',
+}
parenthetical_aside_re = re.compile('^\s*\(.*\)\s*$')
def graze(input):
diff --git a/tools/dev/fsfs-access-map.c b/tools/dev/fsfs-access-map.c
new file mode 100644
index 0000000..5fbd221
--- /dev/null
+++ b/tools/dev/fsfs-access-map.c
@@ -0,0 +1,678 @@
+/* fsfs-access-map.c -- convert strace output into FSFS access bitmap
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_pools.h"
+#include "svn_string.h"
+#include "svn_io.h"
+
+#include "private/svn_string_private.h"
+
+/* The information we gather for each file. There will be one instance
+ * per file name - even if the file got deleted and re-created.
+ */
+typedef struct file_stats_t
+{
+ /* file name as found in the open() call */
+ const char *name;
+
+ /* file size as determined during the this tool run. Will be 0 for
+ * files that no longer exist. However, there may still be entries
+ * in the read_map. */
+ apr_int64_t size;
+
+ /* for rev files (packed or non-packed), this will be the first revision
+ * that file. -1 for non-rev files. */
+ apr_int64_t rev_num;
+
+ /* number of times this file got opened */
+ apr_int64_t open_count;
+
+ /* number of lseek counts */
+ apr_int64_t seek_count;
+
+ /* number of lseek calls to clusters not previously read */
+ apr_int64_t uncached_seek_count;
+
+ /* number of read() calls */
+ apr_int64_t read_count;
+
+ /* total number of bytes returned by those reads */
+ apr_int64_t read_size;
+
+ /* number of clusters read */
+ apr_int64_t clusters_read;
+
+ /* number of different clusters read
+ * (i.e. number of non-zero entries in read_map). */
+ apr_int64_t unique_clusters_read;
+
+ /* cluster -> read count mapping (1 word per cluster, saturated at 64k) */
+ apr_array_header_t *read_map;
+
+} file_stats_t;
+
+/* Represents an open file handle. It refers to a file and concatenates
+ * consecutive reads such that we don't artificially hit the same cluster
+ * multiple times. Instances of this type will be reused to limit the
+ * allocation load on the lookup map.
+ */
+typedef struct handle_info_t
+{
+ /* the open file */
+ file_stats_t *file;
+
+ /* file offset at which the current series of reads started (default: 0) */
+ apr_int64_t last_read_start;
+
+ /* bytes read so far in the current series of reads started (default: 0) */
+ apr_int64_t last_read_size;
+
+} handle_info_t;
+
+/* useful typedef */
+typedef unsigned char byte;
+typedef unsigned short word;
+
+/* global const char * file name -> *file_info_t map */
+static apr_hash_t *files = NULL;
+
+/* global int handle -> *handle_info_t map. Entries don't get removed
+ * by close(). Instead, we simply recycle (and re-initilize) existing
+ * instances. */
+static apr_hash_t *handles = NULL;
+
+/* assume cluster size. 64 and 128kB are typical values for RAIDs. */
+static apr_int64_t cluster_size = 64 * 1024;
+
+/* Call this after a sequence of reads has been ended by either close()
+ * or lseek() for this HANDLE_INFO. This will update the read_map and
+ * unique_clusters_read members of the underlying file_info_t structure.
+ */
+static void
+store_read_info(handle_info_t *handle_info)
+{
+ if (handle_info->last_read_size)
+ {
+ apr_size_t i;
+ apr_size_t first_cluster
+ = (apr_size_t)(handle_info->last_read_start / cluster_size);
+ apr_size_t last_cluster
+ = (apr_size_t)(( handle_info->last_read_start
+ + handle_info->last_read_size
+ - 1) / cluster_size);
+
+ /* auto-expand access map in case the file later shrunk or got deleted */
+ while (handle_info->file->read_map->nelts <= last_cluster)
+ APR_ARRAY_PUSH(handle_info->file->read_map, word) = 0;
+
+ /* accumulate the accesses per cluster. Saturate and count first
+ * (i.e. disjoint) accesses clusters */
+ handle_info->file->clusters_read += last_cluster - first_cluster + 1;
+ for (i = first_cluster; i <= last_cluster; ++i)
+ {
+ word *count = &APR_ARRAY_IDX(handle_info->file->read_map, i, word);
+ if (*count == 0)
+ handle_info->file->unique_clusters_read++;
+ if (*count < 0xffff)
+ ++*count;
+ }
+ }
+}
+
+/* Handle a open() call. Ensures that a file_info_t for the given NAME
+ * exists. Auto-create and initialize a handle_info_t for it linked to
+ * HANDLE.
+ */
+static void
+open_file(const char *name, int handle)
+{
+ file_stats_t *file = apr_hash_get(files, name, APR_HASH_KEY_STRING);
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+
+ /* auto-create file info */
+ if (!file)
+ {
+ apr_pool_t *pool = apr_hash_pool_get(files);
+ apr_pool_t *sub_pool = svn_pool_create(pool);
+
+ apr_file_t *apr_file = NULL;
+ apr_finfo_t finfo = { 0 };
+ apr_size_t cluster_count = 0;
+
+ /* determine file size (if file still exists) */
+ apr_file_open(&apr_file, name,
+ APR_READ | APR_BUFFERED, APR_OS_DEFAULT, sub_pool);
+ if (apr_file)
+ apr_file_info_get(&finfo, APR_FINFO_SIZE, apr_file);
+ svn_pool_destroy(sub_pool);
+
+ file = apr_pcalloc(pool, sizeof(*file));
+ file->name = apr_pstrdup(pool, name);
+ file->size = finfo.size;
+
+ /* pre-allocate cluster map accordingly
+ * (will be auto-expanded later if necessary) */
+ cluster_count = (apr_size_t)(1 + (file->size - 1) / cluster_size);
+ file->read_map = apr_array_make(pool, file->size
+ ? cluster_count
+ : 1, sizeof(word));
+
+ while (file->read_map->nelts < cluster_count)
+ APR_ARRAY_PUSH(file->read_map, byte) = 0;
+
+ /* determine first revision of rev / packed rev files */
+ if (strstr(name, "/db/revs/") != NULL && strstr(name, "manifest") == NULL)
+ if (strstr(name, ".pack/pack") != NULL)
+ file->rev_num = atoi(strstr(name, "/db/revs/") + 9);
+ else
+ file->rev_num = atoi(strrchr(name, '/') + 1);
+ else
+ file->rev_num = -1;
+
+ apr_hash_set(files, file->name, APR_HASH_KEY_STRING, file);
+ }
+
+ file->open_count++;
+
+ /* auto-create handle instance */
+ if (!handle_info)
+ {
+ apr_pool_t *pool = apr_hash_pool_get(handles);
+ int *key = apr_palloc(pool, sizeof(*key));
+ *key = handle;
+
+ handle_info = apr_pcalloc(pool, sizeof(*handle_info));
+ apr_hash_set(handles, key, sizeof(handle), handle_info);
+ }
+
+ /* link handle to file */
+ handle_info->file = file;
+ handle_info->last_read_start = 0;
+ handle_info->last_read_size = 0;
+}
+
+/* COUNT bytes have been read from file with the given HANDLE.
+ */
+static void
+read_file(int handle, apr_int64_t count)
+{
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ {
+ /* known file handle -> expand current read sequence */
+
+ handle_info->last_read_size += count;
+ handle_info->file->read_count++;
+ handle_info->file->read_size += count;
+ }
+}
+
+/* Seek to offset LOCATION in file given by HANDLE.
+ */
+static void
+seek_file(int handle, apr_int64_t location)
+{
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ {
+ /* known file handle -> end current read sequence and start a new one */
+
+ apr_size_t cluster = (apr_size_t)(location / cluster_size);
+
+ store_read_info(handle_info);
+
+ handle_info->last_read_size = 0;
+ handle_info->last_read_start = location;
+ handle_info->file->seek_count++;
+
+ /* if we seek to a location that had not been read from before,
+ * there will probably be a real I/O seek on the following read.
+ */
+ if ( handle_info->file->read_map->nelts <= cluster
+ || APR_ARRAY_IDX(handle_info->file->read_map, cluster, word) == 0)
+ handle_info->file->uncached_seek_count++;
+ }
+}
+
+/* The given file HANDLE has been closed.
+ */
+static void
+close_file(int handle)
+{
+ /* for known file handles, end current read sequence */
+
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ store_read_info(handle_info);
+}
+
+/* Parse / process non-empty the LINE from an strace output.
+ */
+static void
+parse_line(svn_stringbuf_t *line)
+{
+ /* determine function name, first parameter and return value */
+ char *func_end = strchr(line->data, '(');
+ char *return_value = strrchr(line->data, ' ');
+ char *first_param_end;
+ apr_int64_t func_return = 0;
+
+ if (func_end == NULL || return_value == NULL)
+ return;
+
+ first_param_end = strchr(func_end, ',');
+ if (first_param_end == NULL)
+ first_param_end = strchr(func_end, ')');
+
+ if (first_param_end == NULL)
+ return;
+
+ *func_end++ = 0;
+ *first_param_end = 0;
+ ++return_value;
+
+ /* (try to) convert the return value into an integer.
+ * If that fails, continue anyway as defaulting to 0 will be safe for us. */
+ svn_error_clear(svn_cstring_atoi64(&func_return, return_value));
+
+ /* process those operations that we care about */
+ if (strcmp(line->data, "open") == 0)
+ {
+ /* remove double quotes from file name parameter */
+ *func_end++ = 0;
+ *--first_param_end = 0;
+
+ open_file(func_end, (int)func_return);
+ }
+ else if (strcmp(line->data, "read") == 0)
+ read_file(atoi(func_end), func_return);
+ else if (strcmp(line->data, "lseek") == 0)
+ seek_file(atoi(func_end), func_return);
+ else if (strcmp(line->data, "close") == 0)
+ close_file(atoi(func_end));
+}
+
+/* Process the strace output stored in FILE.
+ */
+static void
+parse_file(apr_file_t *file)
+{
+ apr_pool_t *pool = svn_pool_create(NULL);
+ apr_pool_t *iter_pool = svn_pool_create(pool);
+
+ /* limit lines to 4k (usually, we need less than 200 bytes) */
+ svn_stringbuf_t *line = svn_stringbuf_create_ensure(4096, pool);
+
+ do
+ {
+ svn_error_t *err = NULL;
+
+ line->len = line->blocksize-1;
+ err = svn_io_read_length_line(file, line->data, &line->len, iter_pool);
+ svn_error_clear(err);
+ if (err)
+ break;
+
+ parse_line(line);
+ svn_pool_clear(iter_pool);
+ }
+ while (line->len > 0);
+}
+
+/* qsort() callback. Sort files by revision number.
+ */
+static int
+compare_files(file_stats_t **lhs, file_stats_t **rhs)
+{
+ return (*lhs)->rev_num < (*rhs)->rev_num;
+}
+
+/* Return all rev (and packed rev) files sorted by revision number.
+ * Allocate the result in POOL.
+ */
+static apr_array_header_t *
+get_rev_files(apr_pool_t *pool)
+{
+ apr_hash_index_t *hi;
+ apr_array_header_t *result = apr_array_make(pool,
+ apr_hash_count(files),
+ sizeof(file_stats_t *));
+
+ /* select all files that have a rev number */
+ for (hi = apr_hash_first(pool, files); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = NULL;
+ apr_ssize_t len = 0;
+ file_stats_t *file = NULL;
+
+ apr_hash_this(hi, (const void **)&name, &len, (void**)&file);
+ if (file->rev_num >= 0)
+ APR_ARRAY_PUSH(result, file_stats_t *) = file;
+ }
+
+ /* sort them */
+ qsort(result->elts, result->nelts, result->elt_size,
+ (int (*)(const void *, const void *))compare_files);
+
+ /* return the result */
+ return result;
+}
+
+/* store VALUE to DEST in little-endian format. Assume that the target
+ * buffer is filled with 0.
+ */
+static void
+write_number(byte *dest, int value)
+{
+ while (value)
+ {
+ *dest = (byte)(value % 256);
+ value /= 256;
+ ++dest;
+ }
+}
+
+/* Return a linearly interpolated y value for X with X0 <= X <= X1 and
+ * the corresponding Y0 and Y1 values.
+ */
+static int
+interpolate(int y0, int x0, int y1, int x1, int x)
+{
+ return y0 + ((y1 - y0) * (x - x0)) / (x1 - x0);
+}
+
+/* Return the BMP-encoded 24 bit COLOR for the given value.
+ */
+static void
+select_color(byte color[3], word value)
+{
+ enum { COLOR_COUNT = 10 };
+
+ /* value -> color table. Missing values get interpolated.
+ * { count, B - G - R } */
+ word table[COLOR_COUNT][4] =
+ {
+ { 0, 255, 255, 255 }, /* unread -> white */
+ { 1, 64, 128, 0 }, /* read once -> turquoise */
+ { 2, 0, 128, 0 }, /* twice -> green */
+ { 8, 0, 192, 192 }, /* 8x -> yellow */
+ { 64, 0, 0, 192 }, /* 64x -> red */
+ { 256, 64, 32, 230 }, /* 256x -> bright red */
+ { 512, 192, 0, 128 }, /* 512x -> purple */
+ { 1024, 96, 32, 96 }, /* 1024x -> UV purple */
+ { 4096, 32, 16, 32 }, /* 4096x -> EUV purple */
+ { 65535, 0, 0, 0 } /* max -> black */
+ };
+
+ /* find upper limit entry for value */
+ int i;
+ for (i = 0; i < COLOR_COUNT; ++i)
+ if (table[i][0] >= value)
+ break;
+
+ /* exact match? */
+ if (table[i][0] == value)
+ {
+ color[0] = (byte)table[i][1];
+ color[1] = (byte)table[i][2];
+ color[2] = (byte)table[i][3];
+ }
+ else
+ {
+ /* interpolate */
+ color[0] = (byte)interpolate(table[i-1][1], table[i-1][0],
+ table[i][1], table[i][0],
+ value);
+ color[1] = (byte)interpolate(table[i-1][2], table[i-1][0],
+ table[i][2], table[i][0],
+ value);
+ color[2] = (byte)interpolate(table[i-1][3], table[i-1][0],
+ table[i][3], table[i][0],
+ value);
+ }
+}
+
+/* Writes a BMP image header to FILE for a 24-bit color picture of the
+ * given XSIZE and YSIZE dimension.
+ */
+static void
+write_bitmap_header(apr_file_t *file, int xsize, int ysize)
+{
+ /* BMP file header (some values need to filled in later)*/
+ byte header[54] =
+ {
+ 'B', 'M', /* magic */
+ 0, 0, 0, 0, /* file size (to be written later) */
+ 0, 0, 0, 0, /* reserved, unused */
+ 54, 0, 0, 0, /* pixel map starts at offset 54dec */
+
+ 40, 0, 0, 0, /* DIB header has 40 bytes */
+ 0, 0, 0, 0, /* x size in pixel */
+ 0, 0, 0, 0, /* y size in pixel */
+ 1, 0, /* 1 color plane */
+ 24, 0, /* 24 bits / pixel */
+ 0, 0, 0, 0, /* no pixel compression used */
+ 0, 0, 0, 0, /* size of pixel array (to be written later) */
+ 0xe8, 3, 0, 0, /* 1 pixel / mm */
+ 0xe8, 3, 0, 0, /* 1 pixel / mm */
+ 0, 0, 0, 0, /* no colors in palette */
+ 0, 0, 0, 0 /* no colors to import */
+ };
+
+ apr_size_t written;
+
+ /* rows in BMP files must be aligned to 4 bytes */
+ int row_size = APR_ALIGN(xsize * 3, 4);
+
+ /* write numbers to header */
+ write_number(header + 2, ysize * row_size + 54);
+ write_number(header + 18, xsize);
+ write_number(header + 22, ysize);
+ write_number(header + 38, ysize * row_size);
+
+ /* write header to file */
+ written = sizeof(header);
+ apr_file_write(file, header, &written);
+}
+
+/* write the cluster read map for all files in INFO as BMP image to FILE.
+ */
+static void
+write_bitmap(apr_array_header_t *info, apr_file_t *file)
+{
+ int ysize = info->nelts;
+ int xsize = 0;
+ int x, y;
+ int row_size;
+ int padding;
+ apr_size_t written;
+
+ /* xsize = max cluster number */
+ for (y = 0; y < ysize; ++y)
+ if (xsize < APR_ARRAY_IDX(info, y, file_stats_t *)->read_map->nelts)
+ xsize = APR_ARRAY_IDX(info, y, file_stats_t *)->read_map->nelts;
+
+ /* limit picture dimensions (16k pixels in each direction) */
+ if (xsize >= 0x4000)
+ xsize = 0x3fff;
+ if (ysize >= 0x4000)
+ ysize = 0x3fff;
+
+ /* rows in BMP files must be aligned to 4 bytes */
+ row_size = APR_ALIGN(xsize * 3, 4);
+ padding = row_size - xsize * 3;
+
+ /* write header to file */
+ write_bitmap_header(file, xsize, ysize);
+
+ /* write all rows */
+ for (y = 0; y < ysize; ++y)
+ {
+ file_stats_t *file_info = APR_ARRAY_IDX(info, y, file_stats_t *);
+ for (x = 0; x < xsize; ++x)
+ {
+ byte color[3] = { 128, 128, 128 };
+ if (x < file_info->read_map->nelts)
+ {
+ word count = APR_ARRAY_IDX(file_info->read_map, x, word);
+ select_color(color, count);
+ }
+
+ written = sizeof(color);
+ apr_file_write(file, color, &written);
+ }
+
+ if (padding)
+ {
+ char pad[3] = { 0 };
+ written = padding;
+ apr_file_write(file, pad, &written);
+ }
+ }
+}
+
+/* write a color bar with (roughly) logarithmic scale as BMP image to FILE.
+ */
+static void
+write_scale(apr_file_t *file)
+{
+ int x;
+ word value = 0, inc = 1;
+
+ /* write header to file */
+ write_bitmap_header(file, 64, 1);
+
+ for (x = 0; x < 64; ++x)
+ {
+ apr_size_t written;
+ byte color[3] = { 128, 128, 128 };
+
+ select_color(color, value);
+ if (value + (int)inc < 0x10000)
+ {
+ value += inc;
+ if (value >= 8 * inc)
+ inc *= 2;
+ }
+
+ written = sizeof(color);
+ apr_file_write(file, color, &written);
+ }
+}
+
+/* Write a summary of the I/O ops to stdout.
+ * Use POOL for temporaries.
+ */
+static void
+print_stats(apr_pool_t *pool)
+{
+ apr_int64_t open_count = 0;
+ apr_int64_t seek_count = 0;
+ apr_int64_t read_count = 0;
+ apr_int64_t read_size = 0;
+ apr_int64_t clusters_read = 0;
+ apr_int64_t unique_clusters_read = 0;
+ apr_int64_t uncached_seek_count = 0;
+
+ apr_hash_index_t *hi;
+ for (hi = apr_hash_first(pool, files); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = NULL;
+ apr_ssize_t len = 0;
+ file_stats_t *file = NULL;
+
+ apr_hash_this(hi, (const void **)&name, &len, (void**)&file);
+
+ open_count += file->open_count;
+ seek_count += file->seek_count;
+ read_count += file->read_count;
+ read_size += file->read_size;
+ clusters_read += file->clusters_read;
+ unique_clusters_read += file->unique_clusters_read;
+ uncached_seek_count += file->uncached_seek_count;
+ }
+
+ printf("%20s files\n", svn__i64toa_sep(apr_hash_count(files), ',', pool));
+ printf("%20s files opened\n", svn__i64toa_sep(open_count, ',', pool));
+ printf("%20s seeks\n", svn__i64toa_sep(seek_count, ',', pool));
+ printf("%20s uncached seeks\n", svn__i64toa_sep(uncached_seek_count, ',', pool));
+ printf("%20s reads\n", svn__i64toa_sep(read_count, ',', pool));
+ printf("%20s unique clusters read\n", svn__i64toa_sep(unique_clusters_read, ',', pool));
+ printf("%20s clusters read\n", svn__i64toa_sep(clusters_read, ',', pool));
+ printf("%20s bytes read\n", svn__i64toa_sep(read_size, ',', pool));
+}
+
+/* Some help output. */
+static void
+print_usage(void)
+{
+ printf("fsfs-access-map <file>\n\n");
+ printf("Reads strace of some FSFS-based tool from <file>, prints some stats\n");
+ printf("and writes a cluster access map to 'access.bmp' the current folder.\n");
+ printf("Each pixel corresponds to one 64kB cluster and every line to a rev\n");
+ printf("or packed rev file in the repository. Turquoise and greed indicate\n");
+ printf("1 and 2 hits, yellow to read-ish colors for up to 20, shares of\n");
+ printf("for up to 100 and black for > 200 hits.\n\n");
+ printf("A typical strace invocation looks like this:\n");
+ printf("strace -e trace=open,close,read,lseek -o strace.txt svn log ...\n");
+}
+
+/* linear control flow */
+int main(int argc, const char *argv[])
+{
+ apr_pool_t *pool = NULL;
+ apr_file_t *file = NULL;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = svn_pool_create(NULL);
+ files = apr_hash_make(pool);
+ handles = apr_hash_make(pool);
+
+ if (argc == 2)
+ apr_file_open(&file, argv[1], APR_READ | APR_BUFFERED, APR_OS_DEFAULT,
+ pool);
+ if (file == NULL)
+ {
+ print_usage();
+ return 0;
+ }
+ parse_file(file);
+ apr_file_close(file);
+
+ print_stats(pool);
+
+ apr_file_open(&file, "access.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_bitmap(get_rev_files(pool), file);
+ apr_file_close(file);
+
+ apr_file_open(&file, "scale.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_scale(file);
+ apr_file_close(file);
+
+ return 0;
+} \ No newline at end of file
diff --git a/tools/dev/fsfs-reorg.c b/tools/dev/fsfs-reorg.c
new file mode 100644
index 0000000..052ad39
--- /dev/null
+++ b/tools/dev/fsfs-reorg.c
@@ -0,0 +1,3147 @@
+/* fsfs-reorg.c -- prototypic tool to reorganize packed FSFS repositories
+ * to reduce seeks
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+
+#include <assert.h>
+
+#include <apr.h>
+#include <apr_general.h>
+#include <apr_file_io.h>
+#include <apr_poll.h>
+
+#include "svn_pools.h"
+#include "svn_diff.h"
+#include "svn_io.h"
+#include "svn_utf.h"
+#include "svn_dirent_uri.h"
+#include "svn_sorts.h"
+#include "svn_delta.h"
+#include "svn_hash.h"
+
+#include "private/svn_string_private.h"
+#include "private/svn_subr_private.h"
+#include "private/svn_dep_compat.h"
+
+#ifndef _
+#define _(x) x
+#endif
+
+#define ERROR_TAG "fsfs-reporg: "
+
+/* forward declarations */
+typedef struct noderev_t noderev_t;
+typedef struct revision_info_t revision_info_t;
+
+/* A FSFS rev file is sequence of fragments and unused space (the latter
+ * only being inserted by this tool and not during ordinary SVN operation).
+ *
+ * This type defines the type of any fragment.
+ *
+ * Please note that the classification as "property", "dir" or "file"
+ * fragments is only to be used while determining the future placement
+ * of a representation. If the rep is shared, the same rep may be used
+ * as *any* of the 3 kinds.
+ */
+enum fragment_kind_t
+{
+ /* the 2 number line containing changes and root node offsets */
+ header_fragment,
+
+ /* list of all changes in a revision */
+ changes_fragment,
+
+ /* (the textual representation of) a noderev */
+ noderev_fragment,
+
+ /* a property rep (including PLAIN / DELTA header) */
+ property_fragment,
+
+ /* a directory rep (including PLAIN / DELTA header) */
+ dir_fragment,
+
+ /* a file rep (including PLAIN / DELTA header) */
+ file_fragment
+};
+
+/* A fragment. This is used to represent the final ordering, i.e. there
+ * will be an array containing elements of this type that basically put
+ * a fragment at some location in the target file.
+ */
+typedef struct fragment_t
+{
+ /* position in the target file */
+ apr_size_t position;
+
+ /* kind of fragment */
+ enum fragment_kind_t kind;
+
+ /* pointer to the fragment struct; type depends on KIND */
+ void *data;
+} fragment_t;
+
+/* Location info for a single revision.
+ */
+typedef struct revision_location_t
+{
+ /* pack file offset (manifest value), 0 for non-packed files */
+ apr_size_t offset;
+
+ /* offset of the changes list relative to OFFSET */
+ apr_size_t changes;
+
+ /* length of the changes list on bytes */
+ apr_size_t changes_len;
+
+ /* first offset behind the revision data in the pack file (file length
+ * for non-packed revs) */
+ apr_size_t end;
+} revision_location_t;
+
+/* Absolute position and size of some item.
+ */
+typedef struct location_t
+{
+ /* absolute offset in the file */
+ apr_size_t offset;
+
+ /* item length in bytes */
+ apr_size_t size;
+} location_t;
+
+/* A parsed directory entry. Note that instances of this struct may be
+ * shared between different DIRECTORY_T containers.
+ */
+typedef struct direntry_t
+{
+ /* (local) entry / path name */
+ const char *name;
+
+ /* strlen (name) */
+ apr_size_t name_len;
+
+ /* node rev providing ID and representation(s) */
+ noderev_t *node;
+} direntry_t;
+
+/* Representation of a parsed directory content.
+ */
+typedef struct directory_t
+{
+ /* array of pointers to DIRENTRY_T */
+ apr_array_header_t *entries;
+
+ /* MD5 of the textual representation. Will be set lazily as a side-effect
+ * of determining the length of this dir's textual representation. */
+ unsigned char target_md5[16];
+
+ /* (expanded) length of the textual representation.
+ * Determined lazily during the write process. */
+ apr_size_t size;
+} directory_t;
+
+/* A representation fragment.
+ */
+typedef struct representation_t
+{
+ /* location in the source file */
+ location_t original;
+
+ /* location in the reordered target file */
+ location_t target;
+
+ /* length of the PLAIN / DELTA line in the source file in bytes */
+ apr_size_t header_size;
+
+ /* deltification base, or NULL if there is none */
+ struct representation_t *delta_base;
+
+ /* revision that contains this representation
+ * (may be referenced by other revisions, though) */
+ revision_info_t *revision;
+
+ /* representation content parsed as a directory. This will be NULL, if
+ * *no* directory noderev uses this representation. */
+ directory_t *dir;
+
+ /* the source content has a PLAIN header, so we may simply copy the
+ * source content into the target */
+ svn_boolean_t is_plain;
+
+ /* coloring flag used in the reordering algorithm to keep track of
+ * representations that still need to be placed. */
+ svn_boolean_t covered;
+} representation_t;
+
+/* A node rev.
+ */
+struct noderev_t
+{
+ /* location within the source file */
+ location_t original;
+
+ /* location within the reorganized target file. */
+ location_t target;
+
+ /* predecessor node, or NULL if there is none */
+ noderev_t *predecessor;
+
+ /* content representation; may be NULL if there is none */
+ representation_t *text;
+
+ /* properties representation; may be NULL if there is none */
+ representation_t *props;
+
+ /* revision that this noderev belongs to */
+ revision_info_t *revision;
+
+ /* coloring flag used in the reordering algorithm to keep track of
+ * representations that still need to be placed. */
+ svn_boolean_t covered;
+};
+
+/* Represents a single revision.
+ * There will be only one instance per revision. */
+struct revision_info_t
+{
+ /* number of this revision */
+ svn_revnum_t revision;
+
+ /* position in the source file */
+ revision_location_t original;
+
+ /* position in the reorganized target file */
+ revision_location_t target;
+
+ /* noderev of the root directory */
+ noderev_t *root_noderev;
+
+ /* all noderevs_t of this revision (ordered by source file offset),
+ * i.e. those that point back to this struct */
+ apr_array_header_t *node_revs;
+
+ /* all representation_t of this revision (ordered by source file offset),
+ * i.e. those that point back to this struct */
+ apr_array_header_t *representations;
+};
+
+/* Represents a packed revision file.
+ */
+typedef struct revision_pack_t
+{
+ /* first revision in the pack file */
+ svn_revnum_t base;
+
+ /* revision_info_t* of all revisions in the pack file; in revision order. */
+ apr_array_header_t *info;
+
+ /* list of fragments to place in the target pack file; in target order. */
+ apr_array_header_t *fragments;
+
+ /* source pack file length */
+ apr_size_t filesize;
+
+ /* temporary value. Equal to the number of bytes in the target pack file
+ * already allocated to fragments. */
+ apr_size_t target_offset;
+} revision_pack_t;
+
+/* Cache for revision source content. All content is stored in DATA and
+ * the HASH maps revision number to an svn_string_t instance whose data
+ * member points into DATA.
+ *
+ * Once TOTAL_SIZE exceeds LIMIT, all content will be discarded. Similarly,
+ * the hash gets cleared every 10000 insertions to keep the HASH_POOL
+ * memory usage in check.
+ */
+typedef struct content_cache_t
+{
+ /* pool used for HASH */
+ apr_pool_t *hash_pool;
+
+ /* svn_revnum_t -> svn_string_t.
+ * The strings become (potentially) invalid when adding new cache entries. */
+ apr_hash_t *hash;
+
+ /* data buffer. the first TOTAL_SIZE bytes are actually being used. */
+ char *data;
+
+ /* DATA capacity */
+ apr_size_t limit;
+
+ /* number of bytes used in DATA */
+ apr_size_t total_size;
+
+ /* number of insertions since the last hash cleanup */
+ apr_size_t insert_count;
+} content_cache_t;
+
+/* A cached directory. In contrast to directory_t, this stored the data as
+ * the plain hash that the normal FSFS will use to serialize & diff dirs.
+ */
+typedef struct dir_cache_entry_t
+{
+ /* revision containing the representation */
+ svn_revnum_t revision;
+
+ /* offset of the representation within that revision */
+ apr_size_t offset;
+
+ /* key-value representation of the directory entries */
+ apr_hash_t *hash;
+} dir_cache_entry_t;
+
+/* Directory cache. (revision, offset) will be mapped directly into the
+ * ENTRIES array of ENTRY_COUNT buckets (many entries will be NULL).
+ * Two alternating pools will be used to allocate dir content.
+ *
+ * If the INSERT_COUNT exceeds a given limit, the pools get exchanged and
+ * the older of the two will be cleared. This is to keep dir objects valid
+ * for at least one insertion.
+ */
+typedef struct dir_cache_t
+{
+ /* fixed-size array of ENTRY_COUNT elements */
+ dir_cache_entry_t *entries;
+
+ /* currently used for entry allocations */
+ apr_pool_t *pool1;
+
+ /* previously used for entry allocations */
+ apr_pool_t *pool2;
+
+ /* size of ENTRIES in elements */
+ apr_size_t entry_count;
+
+ /* number of directory elements added. I.e. usually >> #cached dirs */
+ apr_size_t insert_count;
+} dir_cache_t;
+
+/* A cached, undeltified txdelta window.
+ */
+typedef struct window_cache_entry_t
+{
+ /* revision containing the window */
+ svn_revnum_t revision;
+
+ /* offset of the deltified window within that revision */
+ apr_size_t offset;
+
+ /* window content */
+ svn_stringbuf_t *window;
+} window_cache_entry_t;
+
+/* Cache for undeltified txdelta windows. (revision, offset) will be mapped
+ * directly into the ENTRIES array of INSERT_COUNT buckets (most entries
+ * will be NULL).
+ *
+ * The cache will be cleared when USED exceeds CAPACITY.
+ */
+typedef struct window_cache_t
+{
+ /* fixed-size array of ENTRY_COUNT elements */
+ window_cache_entry_t *entries;
+
+ /* used to allocate windows */
+ apr_pool_t *pool;
+
+ /* size of ENTRIES in elements */
+ apr_size_t entry_count;
+
+ /* maximum combined size of all cached windows */
+ apr_size_t capacity;
+
+ /* current combined size of all cached windows */
+ apr_size_t used;
+} window_cache_t;
+
+/* Root data structure containing all information about a given repository.
+ */
+typedef struct fs_fs_t
+{
+ /* repository to reorg */
+ const char *path;
+
+ /* revision to start at (must be 0, ATM) */
+ svn_revnum_t start_revision;
+
+ /* FSFS format number */
+ int format;
+
+ /* highest revision number in the repo */
+ svn_revnum_t max_revision;
+
+ /* first non-packed revision */
+ svn_revnum_t min_unpacked_rev;
+
+ /* sharing size*/
+ int max_files_per_dir;
+
+ /* all revisions */
+ apr_array_header_t *revisions;
+
+ /* all packed files */
+ apr_array_header_t *packs;
+
+ /* empty representation.
+ * Used as a dummy base for DELTA reps without base. */
+ representation_t *null_base;
+
+ /* revision content cache */
+ content_cache_t *cache;
+
+ /* directory hash cache */
+ dir_cache_t *dir_cache;
+
+ /* undeltified txdelta window cache */
+ window_cache_t *window_cache;
+} fs_fs_t;
+
+/* Return the rev pack folder for revision REV in FS.
+ */
+static const char *
+get_pack_folder(fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return apr_psprintf(pool, "%s/db/revs/%ld.pack",
+ fs->path, rev / fs->max_files_per_dir);
+}
+
+/* Return the path of the file containing revision REV in FS.
+ */
+static const char *
+rev_or_pack_file_name(fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return fs->min_unpacked_rev > rev
+ ? svn_dirent_join(get_pack_folder(fs, rev, pool), "pack", pool)
+ : apr_psprintf(pool, "%s/db/revs/%ld/%ld", fs->path,
+ rev / fs->max_files_per_dir, rev);
+}
+
+/* Open the file containing revision REV in FS and return it in *FILE.
+ */
+static svn_error_t *
+open_rev_or_pack_file(apr_file_t **file,
+ fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return svn_io_file_open(file,
+ rev_or_pack_file_name(fs, rev, pool),
+ APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT,
+ pool);
+}
+
+/* Read the whole content of the file containing REV in FS and return that
+ * in *CONTENT.
+ */
+static svn_error_t *
+read_rev_or_pack_file(svn_stringbuf_t **content,
+ fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return svn_stringbuf_from_file2(content,
+ rev_or_pack_file_name(fs, rev, pool),
+ pool);
+}
+
+/* Return a new content cache with the given size LIMIT. Use POOL for
+ * all cache-related allocations.
+ */
+static content_cache_t *
+create_content_cache(apr_pool_t *pool,
+ apr_size_t limit)
+{
+ content_cache_t *result = apr_pcalloc(pool, sizeof(*result));
+
+ result->hash_pool = svn_pool_create(pool);
+ result->hash = svn_hash__make(result->hash_pool);
+ result->limit = limit;
+ result->total_size = 0;
+ result->insert_count = 0;
+ result->data = apr_palloc(pool, limit);
+
+ return result;
+}
+
+/* Return the content of revision REVISION from CACHE. Return NULL upon a
+ * cache miss. This is a cache-internal function.
+ */
+static svn_string_t *
+get_cached_content(content_cache_t *cache,
+ svn_revnum_t revision)
+{
+ return apr_hash_get(cache->hash, &revision, sizeof(revision));
+}
+
+/* Take the content in DATA and store it under REVISION in CACHE.
+ * This is a cache-internal function.
+ */
+static void
+set_cached_content(content_cache_t *cache,
+ svn_revnum_t revision,
+ svn_string_t *data)
+{
+ svn_string_t *content;
+ svn_revnum_t *key;
+
+ /* double insertion? -> broken cache logic */
+ assert(get_cached_content(cache, revision) == NULL);
+
+ /* purge the cache upon overflow */
+ if (cache->total_size + data->len > cache->limit)
+ {
+ /* the hash pool grows slowly over time; clear it once in a while */
+ if (cache->insert_count > 10000)
+ {
+ svn_pool_clear(cache->hash_pool);
+ cache->hash = svn_hash__make(cache->hash_pool);
+ cache->insert_count = 0;
+ }
+ else
+ cache->hash = svn_hash__make(cache->hash_pool);
+
+ cache->total_size = 0;
+
+ /* buffer overflow / revision too large */
+ if (data->len > cache->limit)
+ SVN_ERR_MALFUNCTION_NO_RETURN();
+ }
+
+ /* copy data to cache and update he index (hash) */
+ content = apr_palloc(cache->hash_pool, sizeof(*content));
+ content->data = cache->data + cache->total_size;
+ content->len = data->len;
+
+ memcpy(cache->data + cache->total_size, data->data, data->len);
+ cache->total_size += data->len;
+
+ key = apr_palloc(cache->hash_pool, sizeof(*key));
+ *key = revision;
+
+ apr_hash_set(cache->hash, key, sizeof(*key), content);
+ ++cache->insert_count;
+}
+
+/* Get the file content of revision REVISION in FS and return it in *DATA.
+ * Use SCRATCH_POOL for temporary allocations.
+ */
+static svn_error_t *
+get_content(svn_string_t **data,
+ fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_pool_t *scratch_pool)
+{
+ apr_file_t *file;
+ revision_info_t *revision_info;
+ svn_stringbuf_t *temp;
+ apr_off_t temp_offset;
+
+ /* try getting the data from our cache */
+ svn_string_t *result = get_cached_content(fs->cache, revision);
+ if (result)
+ {
+ *data = result;
+ return SVN_NO_ERROR;
+ }
+
+ /* not in cache. Is the revision valid at all? */
+ if (revision - fs->start_revision > fs->revisions->nelts)
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("Unknown revision %ld"), revision);
+ revision_info = APR_ARRAY_IDX(fs->revisions,
+ revision - fs->start_revision,
+ revision_info_t*);
+
+ /* read the revision content. Assume that the file has *not* been
+ * reorg'ed, yet, i.e. all data is in one place. */
+ temp = svn_stringbuf_create_ensure( revision_info->original.end
+ - revision_info->original.offset,
+ scratch_pool);
+ temp->len = revision_info->original.end - revision_info->original.offset;
+ SVN_ERR(open_rev_or_pack_file(&file, fs, revision, scratch_pool));
+
+ temp_offset = revision_info->original.offset;
+ SVN_ERR(svn_io_file_seek(file, APR_SET, &temp_offset,
+ scratch_pool));
+ SVN_ERR_ASSERT(temp_offset < APR_SIZE_MAX);
+ revision_info->original.offset = (apr_size_t)temp_offset;
+ SVN_ERR(svn_io_file_read(file, temp->data, &temp->len, scratch_pool));
+
+ /* cache the result and return it */
+ set_cached_content(fs->cache, revision,
+ svn_stringbuf__morph_into_string(temp));
+ *data = get_cached_content(fs->cache, revision);
+
+ return SVN_NO_ERROR;
+}
+
+/* Return a new directory cache with ENTRY_COUNT buckets in its index.
+ * Use POOL for all cache-related allocations.
+ */
+static dir_cache_t *
+create_dir_cache(apr_pool_t *pool,
+ apr_size_t entry_count)
+{
+ dir_cache_t *result = apr_pcalloc(pool, sizeof(*result));
+
+ result->pool1 = svn_pool_create(pool);
+ result->pool2 = svn_pool_create(pool);
+ result->entry_count = entry_count;
+ result->insert_count = 0;
+ result->entries = apr_pcalloc(pool, sizeof(*result->entries) * entry_count);
+
+ return result;
+}
+
+/* Return the position within FS' dir cache ENTRIES index for the given
+ * (REVISION, OFFSET) pair. This is a cache-internal function.
+ */
+static apr_size_t
+get_dir_cache_index(fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_size_t offset)
+{
+ return (revision + offset * 0xd1f3da69) % fs->dir_cache->entry_count;
+}
+
+/* Return the currently active pool of FS' dir cache. Note that it may be
+ * cleared after *2* insertions.
+ */
+static apr_pool_t *
+get_cached_dir_pool(fs_fs_t *fs)
+{
+ return fs->dir_cache->pool1;
+}
+
+/* Return the cached directory content stored in REPRESENTATION within FS.
+ * If that has not been found in cache, return NULL.
+ */
+static apr_hash_t *
+get_cached_dir(fs_fs_t *fs,
+ representation_t *representation)
+{
+ svn_revnum_t revision = representation->revision->revision;
+ apr_size_t offset = representation->original.offset;
+
+ apr_size_t i = get_dir_cache_index(fs, revision, offset);
+ dir_cache_entry_t *entry = &fs->dir_cache->entries[i];
+
+ return entry->offset == offset && entry->revision == revision
+ ? entry->hash
+ : NULL;
+}
+
+/* Cache the directory HASH for REPRESENTATION within FS.
+ */
+static void
+set_cached_dir(fs_fs_t *fs,
+ representation_t *representation,
+ apr_hash_t *hash)
+{
+ /* select the entry to use */
+ svn_revnum_t revision = representation->revision->revision;
+ apr_size_t offset = representation->original.offset;
+
+ apr_size_t i = get_dir_cache_index(fs, revision, offset);
+ dir_cache_entry_t *entry = &fs->dir_cache->entries[i];
+
+ /* clean the cache and rotate pools at regular intervals */
+ fs->dir_cache->insert_count += apr_hash_count(hash);
+ if (fs->dir_cache->insert_count >= fs->dir_cache->entry_count * 100)
+ {
+ apr_pool_t *pool;
+
+ svn_pool_clear(fs->dir_cache->pool2);
+ memset(fs->dir_cache->entries,
+ 0,
+ sizeof(*fs->dir_cache->entries) * fs->dir_cache->entry_count);
+ fs->dir_cache->insert_count = 0;
+
+ pool = fs->dir_cache->pool2;
+ fs->dir_cache->pool2 = fs->dir_cache->pool1;
+ fs->dir_cache->pool1 = pool;
+ }
+
+ /* write data to cache */
+ entry->hash = hash;
+ entry->offset = offset;
+ entry->revision = revision;
+}
+
+/* Return a new txdelta window cache with ENTRY_COUNT buckets in its index
+ * and a the total CAPACITY given in bytes.
+ * Use POOL for all cache-related allocations.
+ */
+static window_cache_t *
+create_window_cache(apr_pool_t *pool,
+ apr_size_t entry_count,
+ apr_size_t capacity)
+{
+ window_cache_t *result = apr_pcalloc(pool, sizeof(*result));
+
+ result->pool = svn_pool_create(pool);
+ result->entry_count = entry_count;
+ result->capacity = capacity;
+ result->used = 0;
+ result->entries = apr_pcalloc(pool, sizeof(*result->entries) * entry_count);
+
+ return result;
+}
+
+/* Return the position within FS' window cache ENTRIES index for the given
+ * (REVISION, OFFSET) pair. This is a cache-internal function.
+ */
+static apr_size_t
+get_window_cache_index(fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_size_t offset)
+{
+ return (revision + offset * 0xd1f3da69) % fs->window_cache->entry_count;
+}
+
+/* Return the cached txdelta window stored in REPRESENTATION within FS.
+ * If that has not been found in cache, return NULL.
+ */
+static svn_stringbuf_t *
+get_cached_window(fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool)
+{
+ svn_revnum_t revision = representation->revision->revision;
+ apr_size_t offset = representation->original.offset;
+
+ apr_size_t i = get_window_cache_index(fs, revision, offset);
+ window_cache_entry_t *entry = &fs->window_cache->entries[i];
+
+ return entry->offset == offset && entry->revision == revision
+ ? svn_stringbuf_dup(entry->window, pool)
+ : NULL;
+}
+
+/* Cache the undeltified txdelta WINDOW for REPRESENTATION within FS.
+ */
+static void
+set_cached_window(fs_fs_t *fs,
+ representation_t *representation,
+ svn_stringbuf_t *window)
+{
+ /* select entry */
+ svn_revnum_t revision = representation->revision->revision;
+ apr_size_t offset = representation->original.offset;
+
+ apr_size_t i = get_window_cache_index(fs, revision, offset);
+ window_cache_entry_t *entry = &fs->window_cache->entries[i];
+
+ /* if the capacity is exceeded, clear the cache */
+ fs->window_cache->used += window->len;
+ if (fs->window_cache->used >= fs->window_cache->capacity)
+ {
+ svn_pool_clear(fs->window_cache->pool);
+ memset(fs->window_cache->entries,
+ 0,
+ sizeof(*fs->window_cache->entries) * fs->window_cache->entry_count);
+ fs->window_cache->used = window->len;
+ }
+
+ /* set the entry to a copy of the window data */
+ entry->window = svn_stringbuf_dup(window, fs->window_cache->pool);
+ entry->offset = offset;
+ entry->revision = revision;
+}
+
+/* Given rev pack PATH in FS, read the manifest file and return the offsets
+ * in *MANIFEST. Use POOL for allocations.
+ */
+static svn_error_t *
+read_manifest(apr_array_header_t **manifest,
+ fs_fs_t *fs,
+ const char *path,
+ apr_pool_t *pool)
+{
+ svn_stream_t *manifest_stream;
+ apr_pool_t *iterpool;
+
+ /* Open the manifest file. */
+ SVN_ERR(svn_stream_open_readonly(&manifest_stream,
+ svn_dirent_join(path, "manifest", pool),
+ pool, pool));
+
+ /* While we're here, let's just read the entire manifest file into an array,
+ so we can cache the entire thing. */
+ iterpool = svn_pool_create(pool);
+ *manifest = apr_array_make(pool, fs->max_files_per_dir, sizeof(apr_size_t));
+ while (1)
+ {
+ svn_stringbuf_t *sb;
+ svn_boolean_t eof;
+ apr_uint64_t val;
+ svn_error_t *err;
+
+ svn_pool_clear(iterpool);
+ SVN_ERR(svn_stream_readline(manifest_stream, &sb, "\n", &eof, iterpool));
+ if (eof)
+ break;
+
+ err = svn_cstring_strtoui64(&val, sb->data, 0, APR_SIZE_MAX, 10);
+ if (err)
+ return svn_error_createf(SVN_ERR_FS_CORRUPT, err,
+ _("Manifest offset '%s' too large"),
+ sb->data);
+ APR_ARRAY_PUSH(*manifest, apr_size_t) = (apr_size_t)val;
+ }
+ svn_pool_destroy(iterpool);
+
+ return svn_stream_close(manifest_stream);
+}
+
+/* Read header information for the revision stored in FILE_CONTENT at
+ * offsets START or END. Return the offsets within FILE_CONTENT for the
+ * *ROOT_NODEREV, the list of *CHANGES and its len in *CHANGES_LEN.
+ * Use POOL for temporary allocations. */
+static svn_error_t *
+read_revision_header(apr_size_t *changes,
+ apr_size_t *changes_len,
+ apr_size_t *root_noderev,
+ svn_stringbuf_t *file_content,
+ apr_size_t start,
+ apr_size_t end,
+ apr_pool_t *pool)
+{
+ char buf[64];
+ const char *line;
+ char *space;
+ apr_uint64_t val;
+ apr_size_t len;
+
+ /* Read in this last block, from which we will identify the last line. */
+ len = sizeof(buf);
+ if (start + len > end)
+ len = end - start;
+
+ memcpy(buf, file_content->data + end - len, len);
+
+ /* The last byte should be a newline. */
+ if (buf[(apr_ssize_t)len - 1] != '\n')
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Revision lacks trailing newline"));
+
+ /* Look for the next previous newline. */
+ buf[len - 1] = 0;
+ line = strrchr(buf, '\n');
+ if (line == NULL)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Final line in revision file longer "
+ "than 64 characters"));
+
+ space = strchr(line, ' ');
+ if (space == NULL)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Final line in revision file missing space"));
+
+ /* terminate the header line */
+ *space = 0;
+
+ /* extract information */
+ SVN_ERR(svn_cstring_strtoui64(&val, line+1, 0, APR_SIZE_MAX, 10));
+ *root_noderev = (apr_size_t)val;
+ SVN_ERR(svn_cstring_strtoui64(&val, space+1, 0, APR_SIZE_MAX, 10));
+ *changes = (apr_size_t)val;
+ *changes_len = end - *changes - start - (buf + len - line) + 1;
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the FSFS format number and sharding size from the format file at
+ * PATH and return it in *PFORMAT and *MAX_FILES_PER_DIR respectively.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+read_format(int *pformat, int *max_files_per_dir,
+ const char *path, apr_pool_t *pool)
+{
+ svn_error_t *err;
+ apr_file_t *file;
+ char buf[80];
+ apr_size_t len;
+
+ /* open format file and read the first line */
+ err = svn_io_file_open(&file, path, APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ if (err && APR_STATUS_IS_ENOENT(err->apr_err))
+ {
+ /* Treat an absent format file as format 1. Do not try to
+ create the format file on the fly, because the repository
+ might be read-only for us, or this might be a read-only
+ operation, and the spirit of FSFS is to make no changes
+ whatseover in read-only operations. See thread starting at
+ http://subversion.tigris.org/servlets/ReadMsg?list=dev&msgNo=97600
+ for more. */
+ svn_error_clear(err);
+ *pformat = 1;
+ *max_files_per_dir = 0;
+
+ return SVN_NO_ERROR;
+ }
+ SVN_ERR(err);
+
+ len = sizeof(buf);
+ err = svn_io_read_length_line(file, buf, &len, pool);
+ if (err && APR_STATUS_IS_EOF(err->apr_err))
+ {
+ /* Return a more useful error message. */
+ svn_error_clear(err);
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("Can't read first line of format file '%s'"),
+ svn_dirent_local_style(path, pool));
+ }
+ SVN_ERR(err);
+
+ /* Check that the first line contains only digits. */
+ SVN_ERR(svn_cstring_atoi(pformat, buf));
+
+ /* Set the default values for anything that can be set via an option. */
+ *max_files_per_dir = 0;
+
+ /* Read any options. */
+ while (1)
+ {
+ len = sizeof(buf);
+ err = svn_io_read_length_line(file, buf, &len, pool);
+ if (err && APR_STATUS_IS_EOF(err->apr_err))
+ {
+ /* No more options; that's okay. */
+ svn_error_clear(err);
+ break;
+ }
+ SVN_ERR(err);
+
+ if (strncmp(buf, "layout ", 7) == 0)
+ {
+ if (strcmp(buf+7, "linear") == 0)
+ {
+ *max_files_per_dir = 0;
+ continue;
+ }
+
+ if (strncmp(buf+7, "sharded ", 8) == 0)
+ {
+ /* Check that the argument is numeric. */
+ SVN_ERR(svn_cstring_atoi(max_files_per_dir, buf + 15));
+ continue;
+ }
+ }
+
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("'%s' contains invalid filesystem format option '%s'"),
+ svn_dirent_local_style(path, pool), buf);
+ }
+
+ return svn_io_file_close(file, pool);
+}
+
+/* Read the content of the file at PATH and return it in *RESULT.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+read_number(svn_revnum_t *result, const char *path, apr_pool_t *pool)
+{
+ svn_stringbuf_t *content;
+ apr_uint64_t number;
+
+ SVN_ERR(svn_stringbuf_from_file2(&content, path, pool));
+
+ content->data[content->len-1] = 0;
+ SVN_ERR(svn_cstring_strtoui64(&number, content->data, 0, LONG_MAX, 10));
+ *result = (svn_revnum_t)number;
+
+ return SVN_NO_ERROR;
+}
+
+/* Create *FS for the repository at PATH and read the format and size info.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+fs_open(fs_fs_t **fs, const char *path, apr_pool_t *pool)
+{
+ *fs = apr_pcalloc(pool, sizeof(**fs));
+ (*fs)->path = apr_pstrdup(pool, path);
+ (*fs)->max_files_per_dir = 1000;
+
+ /* Read the FS format number. */
+ SVN_ERR(read_format(&(*fs)->format,
+ &(*fs)->max_files_per_dir,
+ svn_dirent_join(path, "db/format", pool),
+ pool));
+ if (((*fs)->format != 4) && ((*fs)->format != 6))
+ return svn_error_create(SVN_ERR_FS_UNSUPPORTED_FORMAT, NULL, NULL);
+
+ /* read size (HEAD) info */
+ SVN_ERR(read_number(&(*fs)->min_unpacked_rev,
+ svn_dirent_join(path, "db/min-unpacked-rev", pool),
+ pool));
+ return read_number(&(*fs)->max_revision,
+ svn_dirent_join(path, "db/current", pool),
+ pool);
+}
+
+/* Utility function that returns true if STRING->DATA matches KEY.
+ */
+static svn_boolean_t
+key_matches(svn_string_t *string, const char *key)
+{
+ return strcmp(string->data, key) == 0;
+}
+
+/* Comparator used for binary search comparing the absolute file offset
+ * of a noderev to some other offset. DATA is a *noderev_t, KEY is pointer
+ * to an apr_size_t.
+ */
+static int
+compare_noderev_offsets(const void *data, const void *key)
+{
+ apr_ssize_t diff = (*(const noderev_t *const *)data)->original.offset
+ - *(const apr_size_t *)key;
+
+ /* sizeof(int) may be < sizeof(ssize_t) */
+ if (diff < 0)
+ return -1;
+ return diff > 0 ? 1 : 0;
+}
+
+/* Get the revision and offset info from the node ID with FS. Return the
+ * data as *REVISION_INFO and *OFFSET, respectively.
+ *
+ * Note that we assume that the revision_info_t object ID's revision has
+ * already been created. That can be guaranteed for standard FSFS pack
+ * files as IDs never point to future revisions.
+ */
+static svn_error_t *
+parse_revnode_pos(revision_info_t **revision_info,
+ apr_size_t *offset,
+ fs_fs_t *fs,
+ svn_string_t *id)
+{
+ int revision;
+ apr_uint64_t temp;
+
+ /* split the ID and verify the format */
+ const char *revision_pos = strrchr(id->data, 'r');
+ char *offset_pos = (char *)strchr(id->data, '/');
+
+ if (revision_pos == NULL || offset_pos == NULL)
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("Invalid node id '%s'"), id->data);
+
+ /* extract the numbers (temp. modifying the ID)*/
+ *offset_pos = 0;
+ SVN_ERR(svn_cstring_atoi(&revision, revision_pos + 1));
+ SVN_ERR(svn_cstring_strtoui64(&temp, offset_pos + 1, 0, APR_SIZE_MAX, 10));
+ *offset = (apr_size_t)temp;
+ *offset_pos = '/';
+
+ /* validate the revision number and return the revision info */
+ if (revision - fs->start_revision > fs->revisions->nelts)
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("Unknown revision %d"), revision);
+
+ *revision_info = APR_ARRAY_IDX(fs->revisions,
+ revision - fs->start_revision,
+ revision_info_t*);
+
+ return SVN_NO_ERROR;
+}
+
+/* Returns in *RESULT the noderev at OFFSET relative the revision given in
+ * REVISION_INFO. If no such noderev has been parsed, yet, error out.
+ *
+ * Since we require the noderev to already have been parsed, we can use
+ * this functions only to access "older", i.e. predecessor noderevs.
+ */
+static svn_error_t *
+find_noderev(noderev_t **result,
+ revision_info_t *revision_info,
+ apr_size_t offset)
+{
+ int idx = svn_sort__bsearch_lower_bound(&offset,
+ revision_info->node_revs,
+ compare_noderev_offsets);
+ if ((idx < 0) || (idx >= revision_info->node_revs->nelts))
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("No noderev found at offset %ld"),
+ (long)offset);
+
+ *result = APR_ARRAY_IDX(revision_info->node_revs, idx, noderev_t *);
+ if ((*result)->original.offset != offset)
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("No noderev found at offset %ld"),
+ (long)offset);
+
+ return SVN_NO_ERROR;
+}
+
+/* In *RESULT, return the noderev given by ID in FS. The noderev must
+ * already have been parsed and put into the FS data structures.
+ */
+static svn_error_t *
+parse_pred(noderev_t **result,
+ fs_fs_t *fs,
+ svn_string_t *id)
+{
+ apr_size_t offset;
+ revision_info_t *revision_info;
+
+ SVN_ERR(parse_revnode_pos(&revision_info, &offset, fs, id));
+ SVN_ERR(find_noderev(result, revision_info, offset));
+
+ return SVN_NO_ERROR;
+}
+
+/* Comparator used for binary search comparing the absolute file offset
+ * of a representation to some other offset. DATA is a *representation_t,
+ * KEY is a pointer to an apr_size_t.
+ */
+static int
+compare_representation_offsets(const void *data, const void *key)
+{
+ apr_ssize_t diff = (*(const representation_t *const *)data)->original.offset
+ - *(const apr_size_t *)key;
+
+ /* sizeof(int) may be < sizeof(ssize_t) */
+ if (diff < 0)
+ return -1;
+ return diff > 0 ? 1 : 0;
+}
+
+/* Find the revision_info_t object to the given REVISION in FS and return
+ * it in *REVISION_INFO. For performance reasons, we skip the lookup if
+ * the info is already provided.
+ *
+ * In that revision, look for the representation_t object for offset OFFSET.
+ * If it already exists, set *idx to its index in *REVISION_INFO's
+ * representations list and return the representation object. Otherwise,
+ * set the index to where it must be inserted and return NULL.
+ */
+static representation_t *
+find_representation(int *idx,
+ fs_fs_t *fs,
+ revision_info_t **revision_info,
+ int revision,
+ apr_size_t offset)
+{
+ revision_info_t *info;
+ *idx = -1;
+
+ /* first let's find the revision '*/
+ info = revision_info ? *revision_info : NULL;
+ if (info == NULL || info->revision != revision)
+ {
+ info = APR_ARRAY_IDX(fs->revisions,
+ revision - fs->start_revision,
+ revision_info_t*);
+ if (revision_info)
+ *revision_info = info;
+ }
+
+ /* not found -> no result */
+ if (info == NULL)
+ return NULL;
+
+ assert(revision == info->revision);
+
+ /* look for the representation */
+ *idx = svn_sort__bsearch_lower_bound(&offset,
+ info->representations,
+ compare_representation_offsets);
+ if (*idx < info->representations->nelts)
+ {
+ /* return the representation, if this is the one we were looking for */
+ representation_t *result
+ = APR_ARRAY_IDX(info->representations, *idx, representation_t *);
+ if (result->original.offset == offset)
+ return result;
+ }
+
+ /* not parsed, yet */
+ return NULL;
+}
+
+/* Read the representation header in FILE_CONTENT at OFFSET. Return its
+ * size in *HEADER_SIZE, set *IS_PLAIN if no deltification was used and
+ * return the deltification base representation in *REPRESENTATION. If
+ * there is none, set it to NULL. Use FS to it look up.
+ *
+ * Use SCRATCH_POOL for temporary allocations.
+ */
+static svn_error_t *
+read_rep_base(representation_t **representation,
+ apr_size_t *header_size,
+ svn_boolean_t *is_plain,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ apr_pool_t *scratch_pool)
+{
+ char *str, *last_str;
+ int idx, revision;
+ apr_uint64_t temp;
+
+ /* identify representation header (1 line) */
+ const char *buffer = file_content->data + offset;
+ const char *line_end = strchr(buffer, '\n');
+ *header_size = line_end - buffer + 1;
+
+ /* check for PLAIN rep */
+ if (strncmp(buffer, "PLAIN\n", *header_size) == 0)
+ {
+ *is_plain = TRUE;
+ *representation = NULL;
+ return SVN_NO_ERROR;
+ }
+
+ /* check for DELTA against empty rep */
+ *is_plain = FALSE;
+ if (strncmp(buffer, "DELTA\n", *header_size) == 0)
+ {
+ /* This is a delta against the empty stream. */
+ *representation = fs->null_base;
+ return SVN_NO_ERROR;
+ }
+
+ /* it's delta against some other rep. Duplicate the header info such
+ * that we may modify it during parsing. */
+ str = apr_pstrndup(scratch_pool, buffer, line_end - buffer);
+ last_str = str;
+
+ /* parse it. */
+ str = svn_cstring_tokenize(" ", &last_str);
+ str = svn_cstring_tokenize(" ", &last_str);
+ SVN_ERR(svn_cstring_atoi(&revision, str));
+
+ str = svn_cstring_tokenize(" ", &last_str);
+ SVN_ERR(svn_cstring_strtoui64(&temp, str, 0, APR_SIZE_MAX, 10));
+
+ /* it should refer to a rep in an earlier revision. Look it up */
+ *representation = find_representation(&idx, fs, NULL, revision, (apr_size_t)temp);
+ return SVN_NO_ERROR;
+}
+
+/* Parse the representation reference (text: or props:) in VALUE, look
+ * it up in FS and return it in *REPRESENTATION. To be able to parse the
+ * base rep, we pass the FILE_CONTENT as well.
+ *
+ * If necessary, allocate the result in POOL; use SCRATCH_POOL for temp.
+ * allocations.
+ */
+static svn_error_t *
+parse_representation(representation_t **representation,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ svn_string_t *value,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ representation_t *result;
+ int revision;
+
+ apr_uint64_t offset;
+ apr_uint64_t size;
+ int idx;
+
+ /* read location (revision, offset) and size */
+ char *c = (char *)value->data;
+ SVN_ERR(svn_cstring_atoi(&revision, svn_cstring_tokenize(" ", &c)));
+ SVN_ERR(svn_cstring_strtoui64(&offset, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
+ SVN_ERR(svn_cstring_strtoui64(&size, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
+
+ /* look it up */
+ result = find_representation(&idx, fs, &revision_info, revision, (apr_size_t)offset);
+ if (!result)
+ {
+ /* not parsed, yet (probably a rep in the same revision).
+ * Create a new rep object and determine its base rep as well.
+ */
+ result = apr_pcalloc(pool, sizeof(*result));
+ result->revision = revision_info;
+ result->original.offset = (apr_size_t)offset;
+ result->original.size = (apr_size_t)size;
+ SVN_ERR(read_rep_base(&result->delta_base, &result->header_size,
+ &result->is_plain, fs, file_content,
+ (apr_size_t)offset + revision_info->original.offset,
+ scratch_pool));
+
+ svn_sort__array_insert(&result, revision_info->representations, idx);
+ }
+
+ *representation = result;
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the delta window contents of all windows in REPRESENTATION in FS.
+ * Return the data as svn_txdelta_window_t* instances in *WINDOWS.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+read_windows(apr_array_header_t **windows,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool)
+{
+ svn_string_t *content;
+ svn_string_t data;
+ svn_stream_t *stream;
+ apr_size_t offset = representation->original.offset
+ + representation->header_size;
+ char version;
+ apr_size_t len = sizeof(version);
+
+ *windows = apr_array_make(pool, 0, sizeof(svn_txdelta_window_t *));
+
+ /* get the whole revision content */
+ SVN_ERR(get_content(&content, fs, representation->revision->revision, pool));
+
+ /* create a read stream and position it directly after the rep header */
+ data.data = content->data + offset + 3;
+ data.len = representation->original.size - 3;
+ stream = svn_stream_from_string(&data, pool);
+ SVN_ERR(svn_stream_read(stream, &version, &len));
+
+ /* read the windows from that stream */
+ while (TRUE)
+ {
+ svn_txdelta_window_t *window;
+ svn_stream_mark_t *mark;
+ char dummy;
+
+ len = sizeof(dummy);
+ SVN_ERR(svn_stream_mark(stream, &mark, pool));
+ SVN_ERR(svn_stream_read(stream, &dummy, &len));
+ if (len == 0)
+ break;
+
+ SVN_ERR(svn_stream_seek(stream, mark));
+ SVN_ERR(svn_txdelta_read_svndiff_window(&window, stream, version, pool));
+ APR_ARRAY_PUSH(*windows, svn_txdelta_window_t *) = window;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the content of the PLAIN REPRESENTATION in FS and return it in
+ * *CONTENT. Use POOL for allocations.
+ */
+static svn_error_t *
+read_plain(svn_stringbuf_t **content,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool)
+{
+ svn_string_t *data;
+ apr_size_t offset = representation->original.offset
+ + representation->header_size;
+
+ SVN_ERR(get_content(&data, fs, representation->revision->revision, pool));
+
+ /* content is stored as fulltext already */
+ *content = svn_stringbuf_ncreate(data->data + offset,
+ representation->original.size,
+ pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Get the undeltified representation that is a result of combining all
+ * deltas from the current desired REPRESENTATION in FS with its base
+ * representation. Store the result in *CONTENT.
+ * Use POOL for allocations. */
+static svn_error_t *
+get_combined_window(svn_stringbuf_t **content,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool)
+{
+ int i;
+ apr_array_header_t *windows;
+ svn_stringbuf_t *base_content, *result;
+ const char *source;
+ apr_pool_t *sub_pool;
+ apr_pool_t *iter_pool;
+
+ /* special case: no un-deltification necessary */
+ if (representation->is_plain)
+ return read_plain(content, fs, representation, pool);
+
+ /* special case: data already in cache */
+ *content = get_cached_window(fs, representation, pool);
+ if (*content)
+ return SVN_NO_ERROR;
+
+ /* read the delta windows for this representation */
+ sub_pool = svn_pool_create(pool);
+ iter_pool = svn_pool_create(pool);
+ SVN_ERR(read_windows(&windows, fs, representation, sub_pool));
+
+ /* fetch the / create a base content */
+ if (representation->delta_base && representation->delta_base->revision)
+ SVN_ERR(get_combined_window(&base_content, fs,
+ representation->delta_base, sub_pool));
+ else
+ base_content = svn_stringbuf_create_empty(sub_pool);
+
+ /* apply deltas */
+ result = svn_stringbuf_create_empty(pool);
+ source = base_content->data;
+
+ for (i = 0; i < windows->nelts; ++i)
+ {
+ svn_txdelta_window_t *window
+ = APR_ARRAY_IDX(windows, i, svn_txdelta_window_t *);
+ svn_stringbuf_t *buf
+ = svn_stringbuf_create_ensure(window->tview_len, iter_pool);
+
+ buf->len = window->tview_len;
+ svn_txdelta_apply_instructions(window, window->src_ops ? source : NULL,
+ buf->data, &buf->len);
+
+ svn_stringbuf_appendbytes(result, buf->data, buf->len);
+ source += window->sview_len;
+
+ svn_pool_clear(iter_pool);
+ }
+
+ svn_pool_destroy(iter_pool);
+ svn_pool_destroy(sub_pool);
+
+ /* cache result and return it */
+ set_cached_window(fs, representation, result);
+ *content = result;
+
+ return SVN_NO_ERROR;
+}
+
+/* forward declaration */
+static svn_error_t *
+read_noderev(noderev_t **noderev,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool);
+
+/* Get the noderev at OFFSET in FILE_CONTENT in FS. The file content must
+ * pertain to the revision given in REVISION_INFO. If the data has not
+ * been read yet, parse it and store it in REVISION_INFO. Return the result
+ * in *NODEREV.
+ *
+ * Use POOL for allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+get_noderev(noderev_t **noderev,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ int idx = svn_sort__bsearch_lower_bound(&offset,
+ revision_info->node_revs,
+ compare_noderev_offsets);
+ if ((idx < 0) || (idx >= revision_info->node_revs->nelts))
+ SVN_ERR(read_noderev(noderev, fs, file_content, offset, revision_info,
+ pool, scratch_pool));
+ else
+ {
+ *noderev = APR_ARRAY_IDX(revision_info->node_revs, idx, noderev_t *);
+ if ((*noderev)->original.offset != offset)
+ SVN_ERR(read_noderev(noderev, fs, file_content, offset, revision_info,
+ pool, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the directory stored in REPRESENTATION in FS into *HASH. The result
+ * will be allocated in FS' directory cache and it will be plain key-value
+ * hash. Use SCRATCH_POOL for temporary allocations.
+ */
+static svn_error_t *
+read_dir(apr_hash_t **hash,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *scratch_pool)
+{
+ svn_stringbuf_t *text;
+ apr_pool_t *text_pool;
+ svn_stream_t *stream;
+ apr_pool_t *pool;
+
+ /* chances are, we find the info in cache already */
+ *hash = get_cached_dir(fs, representation);
+ if (*hash)
+ return SVN_NO_ERROR;
+
+ /* create the result container */
+ pool = get_cached_dir_pool(fs);
+ *hash = svn_hash__make(pool);
+
+ /* if this is a non-empty rep, read it and de-serialize the hash */
+ if (representation != NULL)
+ {
+ text_pool = svn_pool_create(scratch_pool);
+ SVN_ERR(get_combined_window(&text, fs, representation, text_pool));
+ stream = svn_stream_from_stringbuf(text, text_pool);
+ SVN_ERR(svn_hash_read2(*hash, stream, SVN_HASH_TERMINATOR, pool));
+ svn_pool_destroy(text_pool);
+ }
+
+ /* cache the result */
+ set_cached_dir(fs, representation, *hash);
+
+ return SVN_NO_ERROR;
+}
+
+/* Starting at the directory in REPRESENTATION in FILE_CONTENT, read all
+ * DAG nodes, directories and representations linked in that tree structure.
+ * Store them in FS and read them only once.
+ *
+ * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+parse_dir(fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ representation_t *representation,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *hash;
+ apr_hash_index_t *hi;
+ apr_pool_t *iter_pool = svn_pool_create(scratch_pool);
+ apr_hash_t *base_dir = svn_hash__make(scratch_pool);
+
+ /* special case: empty dir rep */
+ if (representation == NULL)
+ return SVN_NO_ERROR;
+
+ /* if we have a previous representation of that dir, hash it by name */
+ if (representation->delta_base && representation->delta_base->dir)
+ {
+ apr_array_header_t *dir = representation->delta_base->dir->entries;
+ int i;
+
+ for (i = 0; i < dir->nelts; ++i)
+ {
+ direntry_t *entry = APR_ARRAY_IDX(dir, i, direntry_t *);
+ apr_hash_set(base_dir, entry->name, entry->name_len, entry);
+ }
+ }
+
+ /* read this directory */
+ SVN_ERR(read_dir(&hash, fs, representation, scratch_pool));
+
+ /* add it as an array to the representation (entries yet to be filled) */
+ representation->dir = apr_pcalloc(pool, sizeof(*representation->dir));
+ representation->dir->entries
+ = apr_array_make(pool, apr_hash_count(hash), sizeof(direntry_t *));
+
+ /* Translate the string dir entries into real entries. Reuse existing
+ * objects as much as possible to keep memory consumption low.
+ */
+ for (hi = apr_hash_first(pool, hash); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = svn__apr_hash_index_key(hi);
+ svn_string_t *str_val = svn__apr_hash_index_val(hi);
+ apr_size_t offset;
+ revision_info_t *revision_info;
+
+ /* look for corresponding entry in previous version */
+ apr_size_t name_len = strlen(name);
+ direntry_t *entry = base_dir
+ ? apr_hash_get(base_dir, name, name_len)
+ : NULL;
+
+ /* parse the new target revnode ID (revision, offset) */
+ SVN_ERR(parse_revnode_pos(&revision_info, &offset, fs, str_val));
+
+ /* if this is a new entry or if the content changed, create a new
+ * instance for it. */
+ if ( !entry
+ || !entry->node->text
+ || entry->node->text->revision != revision_info
+ || entry->node->original.offset != offset)
+ {
+ /* create & init the new entry. Reuse the name string if possible */
+ direntry_t *new_entry = apr_pcalloc(pool, sizeof(*entry));
+ new_entry->name_len = name_len;
+ if (entry)
+ new_entry->name = entry->name;
+ else
+ new_entry->name = apr_pstrdup(pool, name);
+
+ /* Link it to the content noderev. Recurse. */
+ entry = new_entry;
+ SVN_ERR(get_noderev(&entry->node, fs, file_content, offset,
+ revision_info, pool, iter_pool));
+ }
+
+ /* set the directory entry */
+ APR_ARRAY_PUSH(representation->dir->entries, direntry_t *) = entry;
+ svn_pool_clear(iter_pool);
+ }
+
+ svn_pool_destroy(iter_pool);
+ return SVN_NO_ERROR;
+}
+
+/* Starting at the noderev at OFFSET in FILE_CONTENT, read all DAG nodes,
+ * directories and representations linked in that tree structure. Store
+ * them in FS and read them only once. Return the result in *NODEREV.
+ *
+ * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+read_noderev(noderev_t **noderev,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ noderev_t *result = apr_pcalloc(pool, sizeof(*result));
+ svn_string_t *line;
+ svn_boolean_t is_dir = FALSE;
+
+ scratch_pool = svn_pool_create(scratch_pool);
+
+ /* parse the noderev line-by-line until we find an empty line */
+ result->original.offset = offset;
+ while (1)
+ {
+ /* for this line, extract key and value. Ignore invalid values */
+ svn_string_t key;
+ svn_string_t value;
+ char *sep;
+ const char *start = file_content->data + offset
+ + revision_info->original.offset;
+ const char *end = strchr(start, '\n');
+
+ line = svn_string_ncreate(start, end - start, scratch_pool);
+ offset += end - start + 1;
+
+ /* empty line -> end of noderev data */
+ if (line->len == 0)
+ break;
+
+ sep = strchr(line->data, ':');
+ if (sep == NULL)
+ continue;
+
+ key.data = line->data;
+ key.len = sep - key.data;
+ *sep = 0;
+
+ if (key.len + 2 > line->len)
+ continue;
+
+ value.data = sep + 2;
+ value.len = line->len - (key.len + 2);
+
+ /* translate (key, value) into noderev elements */
+ if (key_matches(&key, "type"))
+ is_dir = strcmp(value.data, "dir") == 0;
+ else if (key_matches(&key, "pred"))
+ SVN_ERR(parse_pred(&result->predecessor, fs, &value));
+ else if (key_matches(&key, "text"))
+ SVN_ERR(parse_representation(&result->text, fs, file_content,
+ &value, revision_info,
+ pool, scratch_pool));
+ else if (key_matches(&key, "props"))
+ SVN_ERR(parse_representation(&result->props, fs, file_content,
+ &value, revision_info,
+ pool, scratch_pool));
+ }
+
+ /* link noderev to revision info */
+ result->revision = revision_info;
+ result->original.size = offset - result->original.offset;
+
+ svn_sort__array_insert(&result,
+ revision_info->node_revs,
+ svn_sort__bsearch_lower_bound(&offset,
+ revision_info->node_revs,
+ compare_noderev_offsets));
+
+ /* if this is a directory, read and process that recursively */
+ if (is_dir)
+ SVN_ERR(parse_dir(fs, file_content, result->text,
+ pool, scratch_pool));
+
+ /* done */
+ svn_pool_destroy(scratch_pool);
+ *noderev = result;
+
+ return SVN_NO_ERROR;
+}
+
+/* Simple utility to print a REVISION number and make it appear immediately.
+ */
+static void
+print_progress(svn_revnum_t revision)
+{
+ printf("%8ld", revision);
+ fflush(stdout);
+}
+
+/* Read the content of the pack file staring at revision BASE and store it
+ * in FS. Use POOL for allocations.
+ */
+static svn_error_t *
+read_pack_file(fs_fs_t *fs,
+ svn_revnum_t base,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *manifest = NULL;
+ apr_pool_t *local_pool = svn_pool_create(pool);
+ apr_pool_t *iter_pool = svn_pool_create(local_pool);
+ int i;
+ svn_stringbuf_t *file_content;
+ revision_pack_t *revisions;
+ const char *pack_folder = get_pack_folder(fs, base, local_pool);
+
+ /* read the whole pack file into memory */
+ SVN_ERR(read_rev_or_pack_file(&file_content, fs, base, local_pool));
+
+ /* create the revision container */
+ revisions = apr_pcalloc(pool, sizeof(*revisions));
+ revisions->base = base;
+ revisions->fragments = NULL;
+ revisions->info = apr_array_make(pool,
+ fs->max_files_per_dir,
+ sizeof(revision_info_t*));
+ revisions->filesize = file_content->len;
+ APR_ARRAY_PUSH(fs->packs, revision_pack_t*) = revisions;
+
+ /* parse the manifest file */
+ SVN_ERR(read_manifest(&manifest, fs, pack_folder, local_pool));
+ if (manifest->nelts != fs->max_files_per_dir)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL, NULL);
+
+ /* process each revision in the pack file */
+ for (i = 0; i < manifest->nelts; ++i)
+ {
+ apr_size_t root_node_offset;
+ svn_string_t rev_content;
+
+ /* create the revision info for the current rev */
+ revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
+ info->node_revs = apr_array_make(iter_pool, 4, sizeof(noderev_t*));
+ info->representations = apr_array_make(iter_pool, 4, sizeof(representation_t*));
+
+ info->revision = base + i;
+ info->original.offset = APR_ARRAY_IDX(manifest, i, apr_size_t);
+ info->original.end = i+1 < manifest->nelts
+ ? APR_ARRAY_IDX(manifest, i+1 , apr_size_t)
+ : file_content->len;
+ SVN_ERR(read_revision_header(&info->original.changes,
+ &info->original.changes_len,
+ &root_node_offset,
+ file_content,
+ APR_ARRAY_IDX(manifest, i , apr_size_t),
+ info->original.end,
+ iter_pool));
+
+ /* put it into our containers */
+ APR_ARRAY_PUSH(revisions->info, revision_info_t*) = info;
+ APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
+
+ /* cache the revision content */
+ rev_content.data = file_content->data + info->original.offset;
+ rev_content.len = info->original.end - info->original.offset;
+ set_cached_content(fs->cache, info->revision, &rev_content);
+
+ /* parse the revision content recursively. */
+ SVN_ERR(read_noderev(&info->root_noderev, fs, file_content,
+ root_node_offset, info, pool, iter_pool));
+
+ /* copy dynamically grown containers from temp into result pool */
+ info->node_revs = apr_array_copy(pool, info->node_revs);
+ info->representations = apr_array_copy(pool, info->representations);
+
+ /* destroy temps */
+ svn_pool_clear(iter_pool);
+ }
+
+ /* one more pack file processed */
+ print_progress(base);
+ svn_pool_destroy(local_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the content of REVSION file and store it in FS.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+read_revision_file(fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_pool_t *pool)
+{
+ apr_size_t root_node_offset;
+ apr_pool_t *local_pool = svn_pool_create(pool);
+ svn_stringbuf_t *file_content;
+ svn_string_t rev_content;
+ revision_pack_t *revisions = apr_pcalloc(pool, sizeof(*revisions));
+ revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
+
+ /* read the whole pack file into memory */
+ SVN_ERR(read_rev_or_pack_file(&file_content, fs, revision, local_pool));
+
+ /* create the revision info for the current rev */
+ info->node_revs = apr_array_make(pool, 4, sizeof(noderev_t*));
+ info->representations = apr_array_make(pool, 4, sizeof(representation_t*));
+
+ info->revision = revision;
+ info->original.offset = 0;
+ info->original.end = file_content->len;
+ SVN_ERR(read_revision_header(&info->original.changes,
+ &info->original.changes_len,
+ &root_node_offset,
+ file_content,
+ 0,
+ info->original.end,
+ local_pool));
+
+ /* put it into our containers */
+ APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
+
+ /* create a pseudo-pack file container for just this rev to keep our
+ * data structures as uniform as possible.
+ */
+ revisions->base = revision;
+ revisions->fragments = NULL;
+ revisions->info = apr_array_make(pool, 1, sizeof(revision_info_t*));
+ revisions->filesize = file_content->len;
+ APR_ARRAY_PUSH(revisions->info, revision_info_t*) = info;
+ APR_ARRAY_PUSH(fs->packs, revision_pack_t*) = revisions;
+
+ /* cache the revision content */
+ rev_content.data = file_content->data + info->original.offset;
+ rev_content.len = info->original.end - info->original.offset;
+ set_cached_content(fs->cache, info->revision, &rev_content);
+
+ /* parse the revision content recursively. */
+ SVN_ERR(read_noderev(&info->root_noderev, fs, file_content,
+ root_node_offset, info,
+ pool, local_pool));
+ APR_ARRAY_PUSH(info->node_revs, noderev_t*) = info->root_noderev;
+
+ /* show progress every 1000 revs or so */
+ if (revision % fs->max_files_per_dir == 0)
+ print_progress(revision);
+
+ svn_pool_destroy(local_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the repository at PATH beginning with revision START_REVISION and
+ * return the result in *FS. Allocate caches with MEMSIZE bytes total
+ * capacity. Use POOL for non-cache allocations.
+ */
+static svn_error_t *
+read_revisions(fs_fs_t **fs,
+ const char *path,
+ svn_revnum_t start_revision,
+ apr_size_t memsize,
+ apr_pool_t *pool)
+{
+ svn_revnum_t revision;
+ apr_size_t content_cache_size;
+ apr_size_t window_cache_size;
+ apr_size_t dir_cache_size;
+
+ /* determine cache sizes */
+ if (memsize < 100)
+ memsize = 100;
+
+ content_cache_size = memsize * 7 / 10 > 4000 ? 4000 : memsize * 7 / 10;
+ window_cache_size = memsize * 2 / 10 * 1024 * 1024;
+ dir_cache_size = (memsize / 10) * 16000;
+
+ /* read repo format and such */
+ SVN_ERR(fs_open(fs, path, pool));
+
+ /* create data containers and caches */
+ (*fs)->start_revision = start_revision
+ - (start_revision % (*fs)->max_files_per_dir);
+ (*fs)->revisions = apr_array_make(pool,
+ (*fs)->max_revision + 1 - (*fs)->start_revision,
+ sizeof(revision_info_t *));
+ (*fs)->packs = apr_array_make(pool,
+ ((*fs)->min_unpacked_rev - (*fs)->start_revision)
+ / (*fs)->max_files_per_dir,
+ sizeof(revision_pack_t *));
+ (*fs)->null_base = apr_pcalloc(pool, sizeof(*(*fs)->null_base));
+ (*fs)->cache = create_content_cache
+ (apr_allocator_owner_get
+ (svn_pool_create_allocator(FALSE)),
+ content_cache_size * 1024 * 1024);
+ (*fs)->dir_cache = create_dir_cache
+ (apr_allocator_owner_get
+ (svn_pool_create_allocator(FALSE)),
+ dir_cache_size);
+ (*fs)->window_cache = create_window_cache
+ (apr_allocator_owner_get
+ (svn_pool_create_allocator(FALSE)),
+ 10000, window_cache_size);
+
+ /* read all packed revs */
+ for ( revision = start_revision
+ ; revision < (*fs)->min_unpacked_rev
+ ; revision += (*fs)->max_files_per_dir)
+ SVN_ERR(read_pack_file(*fs, revision, pool));
+
+ /* read non-packed revs */
+ for ( ; revision <= (*fs)->max_revision; ++revision)
+ SVN_ERR(read_revision_file(*fs, revision, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Return the maximum number of decimal digits required to represent offsets
+ * in the given PACK file.
+ */
+static apr_size_t
+get_max_offset_len(const revision_pack_t *pack)
+{
+ /* the pack files may grow a few percent.
+ * Fudge it up to be on safe side.
+ */
+ apr_size_t max_future_size = pack->filesize * 2 + 10000;
+ apr_size_t result = 0;
+
+ while (max_future_size > 0)
+ {
+ ++result;
+ max_future_size /= 10;
+ }
+
+ return result;
+}
+
+/* Create the fragments container in PACK and add revision header fragments
+ * to it. Use POOL for allocations.
+ */
+static svn_error_t *
+add_revisions_pack_heads(revision_pack_t *pack,
+ apr_pool_t *pool)
+{
+ int i;
+ revision_info_t *info;
+ apr_size_t offset_len = get_max_offset_len(pack);
+ fragment_t fragment;
+
+ /* allocate fragment arrays */
+
+ int fragment_count = 1;
+ for (i = 0; i < pack->info->nelts; ++i)
+ {
+ info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
+ fragment_count += info->node_revs->nelts
+ + info->representations->nelts
+ + 2;
+ }
+
+ pack->target_offset = pack->info->nelts > 1 ? 64 : 0;
+ pack->fragments = apr_array_make(pool,
+ fragment_count,
+ sizeof(fragment_t));
+
+ /* put revision headers first */
+
+ for (i = 0; i < pack->info->nelts - 1; ++i)
+ {
+ info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
+ info->target.offset = pack->target_offset;
+
+ fragment.data = info;
+ fragment.kind = header_fragment;
+ fragment.position = pack->target_offset;
+ APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
+
+ pack->target_offset += 2 * offset_len + 3;
+ }
+
+ info = APR_ARRAY_IDX(pack->info, pack->info->nelts - 1, revision_info_t*);
+ info->target.offset = pack->target_offset;
+
+ /* followed by the changes list */
+
+ for (i = 0; i < pack->info->nelts; ++i)
+ {
+ info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
+
+ info->target.changes = pack->target_offset - info->target.offset;
+ info->target.changes_len = info->original.changes_len;
+
+ fragment.data = info;
+ fragment.kind = changes_fragment;
+ fragment.position = pack->target_offset;
+ APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
+
+ pack->target_offset += info->original.changes_len;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* For the revision given by INFO in FS, return the fragment container in
+ * *FRAGMENTS and the current placement offset in *CURRENT_POS.
+ */
+static svn_error_t *
+get_target_offset(apr_size_t **current_pos,
+ apr_array_header_t **fragments,
+ fs_fs_t *fs,
+ revision_info_t *info)
+{
+ int i;
+ revision_pack_t *pack;
+ svn_revnum_t revision = info->revision;
+
+ /* identify the pack object */
+ if (fs->min_unpacked_rev > revision)
+ {
+ i = (revision - fs->start_revision) / fs->max_files_per_dir;
+ }
+ else
+ {
+ i = (fs->min_unpacked_rev - fs->start_revision) / fs->max_files_per_dir;
+ i += revision - fs->min_unpacked_rev;
+ }
+
+ /* extract the desired info from it */
+ pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
+ *current_pos = &pack->target_offset;
+ *fragments = pack->fragments;
+
+ return SVN_NO_ERROR;
+}
+
+/* forward declaration */
+static svn_error_t *
+add_noderev_recursively(fs_fs_t *fs,
+ noderev_t *node,
+ apr_pool_t *pool);
+
+/* Place fragments for the given REPRESENTATION of the given KIND, iff it
+ * has not been covered, yet. Place the base reps along the deltification
+ * chain as far as those reps have not been covered, yet. If REPRESENTATION
+ * is a directory, recursively place its elements.
+ *
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+add_representation_recursively(fs_fs_t *fs,
+ representation_t *representation,
+ enum fragment_kind_t kind,
+ apr_pool_t *pool)
+{
+ apr_size_t *current_pos;
+ apr_array_header_t *fragments;
+ fragment_t fragment;
+
+ /* place REPRESENTATION only once and only if it exists and will not
+ * be covered later as a directory. */
+ if ( representation == NULL
+ || representation->covered
+ || (representation->dir && kind != dir_fragment)
+ || representation == fs->null_base)
+ return SVN_NO_ERROR;
+
+ /* add and place a fragment for REPRESENTATION */
+ SVN_ERR(get_target_offset(&current_pos, &fragments,
+ fs, representation->revision));
+ representation->target.offset = *current_pos;
+ representation->covered = TRUE;
+
+ fragment.data = representation;
+ fragment.kind = kind;
+ fragment.position = *current_pos;
+ APR_ARRAY_PUSH(fragments, fragment_t) = fragment;
+
+ /* determine the size of data to be added to the target file */
+ if ( kind != dir_fragment
+ && representation->delta_base && representation->delta_base->dir)
+ {
+ /* base rep is a dir -> would change -> need to store it as fulltext
+ * in our target file */
+ apr_pool_t *text_pool = svn_pool_create(pool);
+ svn_stringbuf_t *content;
+
+ SVN_ERR(get_combined_window(&content, fs, representation, text_pool));
+ representation->target.size = content->len;
+ *current_pos += representation->target.size + 13;
+
+ svn_pool_destroy(text_pool);
+ }
+ else
+ if ( kind == dir_fragment
+ || (representation->delta_base && representation->delta_base->dir))
+ {
+ /* deltified directories may grow considerably */
+ if (representation->original.size < 50)
+ *current_pos += 300;
+ else
+ *current_pos += representation->original.size * 3 + 150;
+ }
+ else
+ {
+ /* plain / deltified content will not change but the header may
+ * grow slightly due to larger offsets. */
+ representation->target.size = representation->original.size;
+
+ if (representation->delta_base &&
+ (representation->delta_base != fs->null_base))
+ *current_pos += representation->original.size + 50;
+ else
+ *current_pos += representation->original.size + 13;
+ }
+
+ /* follow the delta chain and place base revs immediately after this */
+ if (representation->delta_base)
+ SVN_ERR(add_representation_recursively(fs,
+ representation->delta_base,
+ kind,
+ pool));
+
+ /* finally, recurse into directories */
+ if (representation->dir)
+ {
+ int i;
+ apr_array_header_t *entries = representation->dir->entries;
+
+ for (i = 0; i < entries->nelts; ++i)
+ {
+ direntry_t *entry = APR_ARRAY_IDX(entries, i, direntry_t *);
+ if (entry->node)
+ SVN_ERR(add_noderev_recursively(fs, entry->node, pool));
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Place fragments for the given NODE in FS, iff it has not been covered,
+ * yet. Place the reps (text, props) immediately after the node.
+ *
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+add_noderev_recursively(fs_fs_t *fs,
+ noderev_t *node,
+ apr_pool_t *pool)
+{
+ apr_size_t *current_pos;
+ apr_array_header_t *fragments;
+ fragment_t fragment;
+
+ /* don't add it twice */
+ if (node->covered)
+ return SVN_NO_ERROR;
+
+ /* add and place a fragment for NODE */
+ SVN_ERR(get_target_offset(&current_pos, &fragments, fs, node->revision));
+ node->covered = TRUE;
+ node->target.offset = *current_pos;
+
+ fragment.data = node;
+ fragment.kind = noderev_fragment;
+ fragment.position = *current_pos;
+ APR_ARRAY_PUSH(fragments, fragment_t) = fragment;
+
+ /* size may slightly increase */
+ *current_pos += node->original.size + 40;
+
+ /* recurse into representations */
+ if (node->text && node->text->dir)
+ SVN_ERR(add_representation_recursively(fs, node->text, dir_fragment, pool));
+ else
+ SVN_ERR(add_representation_recursively(fs, node->text, file_fragment, pool));
+
+ SVN_ERR(add_representation_recursively(fs, node->props, property_fragment, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Place a fragment for the last revision in PACK. Use POOL for allocations.
+ */
+static svn_error_t *
+add_revisions_pack_tail(revision_pack_t *pack,
+ apr_pool_t *pool)
+{
+ int i;
+ revision_info_t *info;
+ apr_size_t offset_len = get_max_offset_len(pack);
+ fragment_t fragment;
+
+ /* put final revision header last and fix up revision lengths */
+
+ info = APR_ARRAY_IDX(pack->info, pack->info->nelts-1, revision_info_t*);
+
+ fragment.data = info;
+ fragment.kind = header_fragment;
+ fragment.position = pack->target_offset;
+ APR_ARRAY_PUSH(pack->fragments, fragment_t) = fragment;
+
+ pack->target_offset += 2 * offset_len + 3;
+
+ /* end of target file reached. Store that info in all revs. */
+ for (i = 0; i < pack->info->nelts; ++i)
+ {
+ info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
+ info->target.end = pack->target_offset;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Place all fragments for all revisions / packs in FS.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+reorder_revisions(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ int i, k;
+
+ /* headers and changes */
+
+ for (i = 0; i < fs->packs->nelts; ++i)
+ {
+ revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
+ SVN_ERR(add_revisions_pack_heads(pack, pool));
+ }
+
+ /* representations & nodes */
+
+ for (i = fs->revisions->nelts-1; i >= 0; --i)
+ {
+ revision_info_t *info = APR_ARRAY_IDX(fs->revisions, i, revision_info_t*);
+ for (k = info->node_revs->nelts - 1; k >= 0; --k)
+ {
+ noderev_t *node = APR_ARRAY_IDX(info->node_revs, k, noderev_t*);
+ SVN_ERR(add_noderev_recursively(fs, node, pool));
+ }
+
+ if (info->revision % fs->max_files_per_dir == 0)
+ print_progress(info->revision);
+ }
+
+ /* pack file tails */
+
+ for (i = 0; i < fs->packs->nelts; ++i)
+ {
+ revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
+ SVN_ERR(add_revisions_pack_tail(pack, pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* forward declaration */
+static svn_error_t *
+get_fragment_content(svn_string_t **content,
+ fs_fs_t *fs,
+ fragment_t *fragment,
+ apr_pool_t *pool);
+
+/* Directory content may change and with it, the deltified representations
+ * may significantly. This function causes all directory target reps in
+ * PACK of FS to be built and their new MD5 as well as rep sizes be updated.
+ * We must do that before attempting to write noderevs.
+ *
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+update_noderevs(fs_fs_t *fs,
+ revision_pack_t *pack,
+ apr_pool_t *pool)
+{
+ int i;
+ apr_pool_t *itempool = svn_pool_create(pool);
+
+ for (i = 0; i < pack->fragments->nelts; ++i)
+ {
+ fragment_t *fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
+ if (fragment->kind == dir_fragment)
+ {
+ svn_string_t *content;
+
+ /* request updated rep content but ignore the result.
+ * We are only interested in the MD5, content and rep size updates. */
+ SVN_ERR(get_fragment_content(&content, fs, fragment, itempool));
+ svn_pool_clear(itempool);
+ }
+ }
+
+ svn_pool_destroy(itempool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Determine the target size of the FRAGMENT in FS and return the value
+ * in *LENGTH. If ADD_PADDING has been set, slightly fudge the numbers
+ * to account for changes in offset lengths etc. Use POOL for temporary
+ * allocations.
+ */
+static svn_error_t *
+get_content_length(apr_size_t *length,
+ fs_fs_t *fs,
+ fragment_t *fragment,
+ svn_boolean_t add_padding,
+ apr_pool_t *pool)
+{
+ svn_string_t *content;
+
+ SVN_ERR(get_fragment_content(&content, fs, fragment, pool));
+ if (add_padding)
+ switch (fragment->kind)
+ {
+ case dir_fragment:
+ *length = content->len + 16;
+ break;
+ case noderev_fragment:
+ *length = content->len + 3;
+ break;
+ default:
+ *length = content->len;
+ break;
+ }
+ else
+ *length = content->len;
+
+ return SVN_NO_ERROR;
+}
+
+/* Move the FRAGMENT to global file offset NEW_POSITION. Update the target
+ * location info of the underlying object as well.
+ */
+static void
+move_fragment(fragment_t *fragment,
+ apr_size_t new_position)
+{
+ revision_info_t *info;
+ representation_t *representation;
+ noderev_t *node;
+
+ /* move the fragment */
+ fragment->position = new_position;
+
+ /* move the underlying object */
+ switch (fragment->kind)
+ {
+ case header_fragment:
+ info = fragment->data;
+ info->target.offset = new_position;
+ break;
+
+ case changes_fragment:
+ info = fragment->data;
+ info->target.changes = new_position - info->target.offset;
+ break;
+
+ case property_fragment:
+ case file_fragment:
+ case dir_fragment:
+ representation = fragment->data;
+ representation->target.offset = new_position;
+ break;
+
+ case noderev_fragment:
+ node = fragment->data;
+ node->target.offset = new_position;
+ break;
+ }
+}
+
+/* Move the fragments in PACK's target fragment list to their final offsets.
+ * This may require several iterations if the fudge factors turned out to
+ * be insufficient. Use POOL for allocations.
+ */
+static svn_error_t *
+pack_revisions(fs_fs_t *fs,
+ revision_pack_t *pack,
+ apr_pool_t *pool)
+{
+ int i;
+ fragment_t *fragment, *next;
+ svn_boolean_t needed_to_expand;
+ revision_info_t *info;
+ apr_size_t current_pos, len, old_len;
+
+ apr_pool_t *itempool = svn_pool_create(pool);
+
+ /* update all directory reps. Chances are that most of the target rep
+ * sizes are now close to accurate. */
+ SVN_ERR(update_noderevs(fs, pack, pool));
+
+ /* compression phase: pack all fragments tightly with only a very small
+ * fudge factor. This should cause offsets to shrink, thus all the
+ * actual fragment rate should tend to be even smaller afterwards. */
+ current_pos = pack->info->nelts > 1 ? 64 : 0;
+ for (i = 0; i + 1 < pack->fragments->nelts; ++i)
+ {
+ fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
+ SVN_ERR(get_content_length(&len, fs, fragment, TRUE, itempool));
+ move_fragment(fragment, current_pos);
+ current_pos += len;
+
+ svn_pool_clear(itempool);
+ }
+
+ /* don't forget the final fragment (last revision's revision header) */
+ fragment = &APR_ARRAY_IDX(pack->fragments, pack->fragments->nelts-1, fragment_t);
+ fragment->position = current_pos;
+
+ /* expansion phase: check whether all fragments fit into their allotted
+ * slots. Grow them geometrically if they don't fit. Retry until they
+ * all do fit.
+ * Note: there is an upper limit to which fragments can grow. So, this
+ * loop will terminate. Often, no expansion will be necessary at all. */
+ do
+ {
+ needed_to_expand = FALSE;
+ current_pos = pack->info->nelts > 1 ? 64 : 0;
+
+ for (i = 0; i + 1 < pack->fragments->nelts; ++i)
+ {
+ fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
+ next = &APR_ARRAY_IDX(pack->fragments, i + 1, fragment_t);
+ old_len = next->position - fragment->position;
+
+ SVN_ERR(get_content_length(&len, fs, fragment, FALSE, itempool));
+
+ if (len > old_len)
+ {
+ len = (apr_size_t)(len * 1.1) + 10;
+ needed_to_expand = TRUE;
+ }
+ else
+ len = old_len;
+
+ if (i == pack->info->nelts - 1)
+ {
+ info = APR_ARRAY_IDX(pack->info, pack->info->nelts - 1, revision_info_t*);
+ info->target.offset = current_pos;
+ }
+
+ move_fragment(fragment, current_pos);
+ current_pos += len;
+
+ svn_pool_clear(itempool);
+ }
+
+ fragment = &APR_ARRAY_IDX(pack->fragments, pack->fragments->nelts-1, fragment_t);
+ fragment->position = current_pos;
+
+ /* update the revision
+ * sizes (they all end at the end of the pack file now) */
+ SVN_ERR(get_content_length(&len, fs, fragment, FALSE, itempool));
+ current_pos += len;
+
+ for (i = 0; i < pack->info->nelts; ++i)
+ {
+ info = APR_ARRAY_IDX(pack->info, i, revision_info_t*);
+ info->target.end = current_pos;
+ }
+ }
+ while (needed_to_expand);
+
+ svn_pool_destroy(itempool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Write reorg'ed target content for PACK in FS. Use POOL for allocations.
+ */
+static svn_error_t *
+write_revisions(fs_fs_t *fs,
+ revision_pack_t *pack,
+ apr_pool_t *pool)
+{
+ int i;
+ fragment_t *fragment = NULL;
+ svn_string_t *content;
+
+ apr_pool_t *itempool = svn_pool_create(pool);
+ apr_pool_t *iterpool = svn_pool_create(pool);
+
+ apr_file_t *file;
+ apr_size_t current_pos = 0;
+ svn_stringbuf_t *null_buffer = svn_stringbuf_create_empty(iterpool);
+
+ /* create the target file */
+ const char *dir = apr_psprintf(iterpool, "%s/new/%ld%s",
+ fs->path, pack->base / fs->max_files_per_dir,
+ pack->info->nelts > 1 ? ".pack" : "");
+ SVN_ERR(svn_io_make_dir_recursively(dir, pool));
+ SVN_ERR(svn_io_file_open(&file,
+ pack->info->nelts > 1
+ ? apr_psprintf(iterpool, "%s/pack", dir)
+ : apr_psprintf(iterpool, "%s/%ld", dir, pack->base),
+ APR_WRITE | APR_CREATE | APR_BUFFERED,
+ APR_OS_DEFAULT,
+ iterpool));
+
+ /* write all fragments */
+ for (i = 0; i < pack->fragments->nelts; ++i)
+ {
+ apr_size_t padding;
+
+ /* get fragment content to write */
+ fragment = &APR_ARRAY_IDX(pack->fragments, i, fragment_t);
+ SVN_ERR(get_fragment_content(&content, fs, fragment, itempool));
+ SVN_ERR_ASSERT(fragment->position >= current_pos);
+
+ /* number of bytes between this and the previous fragment */
+ if ( fragment->kind == header_fragment
+ && i+1 < pack->fragments->nelts)
+ /* special case: header fragments are aligned to the slot end */
+ padding = APR_ARRAY_IDX(pack->fragments, i+1, fragment_t).position -
+ content->len - current_pos;
+ else
+ /* standard case: fragments are aligned to the slot start */
+ padding = fragment->position - current_pos;
+
+ /* write padding between fragments */
+ if (padding)
+ {
+ while (null_buffer->len < padding)
+ svn_stringbuf_appendbyte(null_buffer, 0);
+
+ SVN_ERR(svn_io_file_write_full(file,
+ null_buffer->data,
+ padding,
+ NULL,
+ itempool));
+ current_pos += padding;
+ }
+
+ /* write fragment content */
+ SVN_ERR(svn_io_file_write_full(file,
+ content->data,
+ content->len,
+ NULL,
+ itempool));
+ current_pos += content->len;
+
+ svn_pool_clear(itempool);
+ }
+
+ apr_file_close(file);
+
+ /* write new manifest file */
+ if (pack->info->nelts > 1)
+ {
+ svn_stream_t *stream;
+ SVN_ERR(svn_io_file_open(&file,
+ apr_psprintf(iterpool, "%s/manifest", dir),
+ APR_WRITE | APR_CREATE | APR_BUFFERED,
+ APR_OS_DEFAULT,
+ iterpool));
+ stream = svn_stream_from_aprfile2(file, FALSE, iterpool);
+
+ for (i = 0; i < pack->info->nelts; ++i)
+ {
+ revision_info_t *info = APR_ARRAY_IDX(pack->info, i,
+ revision_info_t *);
+ SVN_ERR(svn_stream_printf(stream, itempool,
+ "%" APR_SIZE_T_FMT "\n",
+ info->target.offset));
+ svn_pool_clear(itempool);
+ }
+ }
+
+ /* cleanup */
+ svn_pool_destroy(itempool);
+ svn_pool_destroy(iterpool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Write reorg'ed target content for all revisions in FS. To maximize
+ * data locality, pack and write in one go per pack file.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+pack_and_write_revisions(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ int i;
+
+ SVN_ERR(svn_io_make_dir_recursively(apr_psprintf(pool, "%s/new",
+ fs->path),
+ pool));
+
+ for (i = 0; i < fs->packs->nelts; ++i)
+ {
+ revision_pack_t *pack = APR_ARRAY_IDX(fs->packs, i, revision_pack_t*);
+ if (pack->base % fs->max_files_per_dir == 0)
+ print_progress(pack->base);
+
+ SVN_ERR(pack_revisions(fs, pack, pool));
+ SVN_ERR(write_revisions(fs, pack, pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* For the directory REPRESENTATION in FS, construct the new (target)
+ * serialized plaintext representation and return it in *CONTENT.
+ * Allocate the result in POOL and temporaries in SCRATCH_POOL.
+ */
+static svn_error_t *
+get_updated_dir(svn_string_t **content,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *hash;
+ apr_pool_t *hash_pool = svn_pool_create(scratch_pool);
+ apr_array_header_t *dir = representation->dir->entries;
+ int i;
+ svn_stream_t *stream;
+ svn_stringbuf_t *result;
+
+ /* get the original content */
+ SVN_ERR(read_dir(&hash, fs, representation, scratch_pool));
+ hash = apr_hash_copy(hash_pool, hash);
+
+ /* update all entries */
+ for (i = 0; i < dir->nelts; ++i)
+ {
+ char buffer[256];
+ svn_string_t *new_val;
+ apr_size_t pos;
+
+ /* find the original entry for for the current name */
+ direntry_t *entry = APR_ARRAY_IDX(dir, i, direntry_t *);
+ svn_string_t *str_val = apr_hash_get(hash, entry->name, entry->name_len);
+ if (str_val == NULL)
+ return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL,
+ _("Dir entry '%s' not found"), entry->name);
+
+ SVN_ERR_ASSERT(str_val->len < sizeof(buffer));
+
+ /* create and updated node ID */
+ memcpy(buffer, str_val->data, str_val->len+1);
+ pos = strchr(buffer, '/') - buffer + 1;
+ pos += svn__ui64toa(buffer + pos, entry->node->target.offset - entry->node->revision->target.offset);
+ new_val = svn_string_ncreate(buffer, pos, hash_pool);
+
+ /* store it in the hash */
+ apr_hash_set(hash, entry->name, entry->name_len, new_val);
+ }
+
+ /* serialize the updated hash */
+ result = svn_stringbuf_create_ensure(representation->target.size, pool);
+ stream = svn_stream_from_stringbuf(result, hash_pool);
+ SVN_ERR(svn_hash_write2(hash, stream, SVN_HASH_TERMINATOR, hash_pool));
+ svn_pool_destroy(hash_pool);
+
+ /* done */
+ *content = svn_stringbuf__morph_into_string(result);
+
+ return SVN_NO_ERROR;
+}
+
+/* Calculate the delta representation for the given CONTENT and BASE.
+ * Return the rep in *DIFF. Use POOL for allocations.
+ */
+static svn_error_t *
+diff_stringbufs(svn_stringbuf_t *diff,
+ svn_string_t *base,
+ svn_string_t *content,
+ apr_pool_t *pool)
+{
+ svn_txdelta_window_handler_t diff_wh;
+ void *diff_whb;
+
+ svn_stream_t *stream;
+ svn_stream_t *source = svn_stream_from_string(base, pool);
+ svn_stream_t *target = svn_stream_from_stringbuf(diff, pool);
+
+ /* Prepare to write the svndiff data. */
+ svn_txdelta_to_svndiff3(&diff_wh,
+ &diff_whb,
+ target,
+ 1,
+ SVN_DELTA_COMPRESSION_LEVEL_DEFAULT,
+ pool);
+
+ /* create delta stream */
+ stream = svn_txdelta_target_push(diff_wh, diff_whb, source, pool);
+
+ /* run delta */
+ SVN_ERR(svn_stream_write(stream, content->data, &content->len));
+ SVN_ERR(svn_stream_close(stream));
+
+ return SVN_NO_ERROR;
+}
+
+/* Update the noderev id value for KEY in the textual noderev representation
+ * in NODE_REV. Take the new id from NODE. This is a no-op if the KEY
+ * cannot be found.
+ */
+static void
+update_id(svn_stringbuf_t *node_rev,
+ const char *key,
+ noderev_t *node)
+{
+ char *newline_pos = 0;
+ char *pos;
+
+ /* we need to update the offset only -> find its position */
+ pos = strstr(node_rev->data, key);
+ if (pos)
+ pos = strchr(pos, '/');
+ if (pos)
+ newline_pos = strchr(++pos, '\n');
+
+ if (pos && newline_pos)
+ {
+ /* offset data has been found -> replace it */
+ char temp[SVN_INT64_BUFFER_SIZE];
+ apr_size_t len = svn__i64toa(temp, node->target.offset - node->revision->target.offset);
+ svn_stringbuf_replace(node_rev,
+ pos - node_rev->data, newline_pos - pos,
+ temp, len);
+ }
+}
+
+/* Update the representation id value for KEY in the textual noderev
+ * representation in NODE_REV. Take the offset, sizes and new MD5 from
+ * REPRESENTATION. Use SCRATCH_POOL for allocations.
+ * This is a no-op if the KEY cannot be found.
+ */
+static void
+update_text(svn_stringbuf_t *node_rev,
+ const char *key,
+ representation_t *representation,
+ apr_pool_t *scratch_pool)
+{
+ apr_size_t key_len = strlen(key);
+ char *pos = strstr(node_rev->data, key);
+ char *val_pos;
+
+ if (!pos)
+ return;
+
+ val_pos = pos + key_len;
+ if (representation->dir)
+ {
+ /* for directories, we need to write all rep info anew */
+ char *newline_pos = strchr(val_pos, '\n');
+ svn_checksum_t checksum;
+ const char* temp = apr_psprintf(scratch_pool, "%ld %" APR_SIZE_T_FMT " %"
+ APR_SIZE_T_FMT" %" APR_SIZE_T_FMT " %s",
+ representation->revision->revision,
+ representation->target.offset - representation->revision->target.offset,
+ representation->target.size,
+ representation->dir->size,
+ svn_checksum_to_cstring(&checksum,
+ scratch_pool));
+
+ checksum.digest = representation->dir->target_md5;
+ checksum.kind = svn_checksum_md5;
+ svn_stringbuf_replace(node_rev,
+ val_pos - node_rev->data, newline_pos - val_pos,
+ temp, strlen(temp));
+ }
+ else
+ {
+ /* ordinary representation: replace offset and rep size only.
+ * Content size and checksums are unchanged. */
+ const char* temp;
+ char *end_pos = strchr(val_pos, ' ');
+
+ val_pos = end_pos + 1;
+ end_pos = strchr(strchr(val_pos, ' ') + 1, ' ');
+ temp = apr_psprintf(scratch_pool, "%" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT,
+ representation->target.offset - representation->revision->target.offset,
+ representation->target.size);
+
+ svn_stringbuf_replace(node_rev,
+ val_pos - node_rev->data, end_pos - val_pos,
+ temp, strlen(temp));
+ }
+}
+
+/* Get the target content (data block as to be written to the file) for
+ * the given FRAGMENT in FS. Return the content in *CONTENT. Use POOL
+ * for allocations.
+ *
+ * Note that, as a side-effect, this will update the target rep. info for
+ * directories.
+ */
+static svn_error_t *
+get_fragment_content(svn_string_t **content,
+ fs_fs_t *fs,
+ fragment_t *fragment,
+ apr_pool_t *pool)
+{
+ revision_info_t *info;
+ representation_t *representation;
+ noderev_t *node;
+ svn_string_t *revision_content, *base_content;
+ svn_stringbuf_t *header, *node_rev, *text;
+ apr_size_t header_size;
+ svn_checksum_t *checksum = NULL;
+
+ switch (fragment->kind)
+ {
+ /* revision headers can be constructed from target position info */
+ case header_fragment:
+ info = fragment->data;
+ *content = svn_string_createf(pool,
+ "\n%" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
+ info->root_noderev->target.offset - info->target.offset,
+ info->target.changes);
+ return SVN_NO_ERROR;
+
+ /* The changes list remains untouched */
+ case changes_fragment:
+ info = fragment->data;
+ SVN_ERR(get_content(&revision_content, fs, info->revision, pool));
+
+ *content = svn_string_create_empty(pool);
+ (*content)->data = revision_content->data + info->original.changes;
+ (*content)->len = info->target.changes_len;
+ return SVN_NO_ERROR;
+
+ /* property and file reps get new headers any need to be rewritten,
+ * iff the base rep is a directory. The actual (deltified) content
+ * remains unchanged, though. MD5 etc. do not change. */
+ case property_fragment:
+ case file_fragment:
+ representation = fragment->data;
+ SVN_ERR(get_content(&revision_content, fs,
+ representation->revision->revision, pool));
+
+ if (representation->delta_base)
+ if (representation->delta_base->dir)
+ {
+ /* if the base happens to be a directory, reconstruct the
+ * full text and represent it as PLAIN rep. */
+ SVN_ERR(get_combined_window(&text, fs, representation, pool));
+ representation->target.size = text->len;
+
+ svn_stringbuf_insert(text, 0, "PLAIN\n", 6);
+ svn_stringbuf_appendcstr(text, "ENDREP\n");
+ *content = svn_stringbuf__morph_into_string(text);
+
+ return SVN_NO_ERROR;
+ }
+ else
+ /* construct a new rep header */
+ if (representation->delta_base == fs->null_base)
+ header = svn_stringbuf_create("DELTA\n", pool);
+ else
+ header = svn_stringbuf_createf(pool,
+ "DELTA %ld %" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
+ representation->delta_base->revision->revision,
+ representation->delta_base->target.offset
+ - representation->delta_base->revision->target.offset,
+ representation->delta_base->target.size);
+ else
+ header = svn_stringbuf_create("PLAIN\n", pool);
+
+ /* if it exists, the actual delta base is unchanged. Hence, this
+ * rep is unchanged even if it has been deltified. */
+ header_size = strchr(revision_content->data +
+ representation->original.offset, '\n') -
+ revision_content->data -
+ representation->original.offset + 1;
+ svn_stringbuf_appendbytes(header,
+ revision_content->data +
+ representation->original.offset +
+ header_size,
+ representation->original.size);
+ svn_stringbuf_appendcstr(header, "ENDREP\n");
+ *content = svn_stringbuf__morph_into_string(header);
+ return SVN_NO_ERROR;
+
+ /* directory reps need to be rewritten (and deltified) completely.
+ * As a side-effect, update the MD5 and target content size. */
+ case dir_fragment:
+ /* construct new content and update MD5 */
+ representation = fragment->data;
+ SVN_ERR(get_updated_dir(&revision_content, fs, representation,
+ pool, pool));
+ SVN_ERR(svn_checksum(&checksum, svn_checksum_md5,
+ revision_content->data, revision_content->len,
+ pool));
+ memcpy(representation->dir->target_md5,
+ checksum->digest,
+ sizeof(representation->dir->target_md5));
+
+ /* deltify against the base rep if necessary */
+ if (representation->delta_base)
+ {
+ if (representation->delta_base->dir == NULL)
+ {
+ /* dummy or non-dir base rep -> self-compress only */
+ header = svn_stringbuf_create("DELTA\n", pool);
+ base_content = svn_string_create_empty(pool);
+ }
+ else
+ {
+ /* deltify against base rep (which is a directory, too)*/
+ representation_t *base_rep = representation->delta_base;
+ header = svn_stringbuf_createf(pool,
+ "DELTA %ld %" APR_SIZE_T_FMT " %" APR_SIZE_T_FMT "\n",
+ base_rep->revision->revision,
+ base_rep->target.offset - base_rep->revision->target.offset,
+ base_rep->target.size);
+ SVN_ERR(get_updated_dir(&base_content, fs, base_rep,
+ pool, pool));
+ }
+
+ /* run deltification and update target content size */
+ header_size = header->len;
+ SVN_ERR(diff_stringbufs(header, base_content,
+ revision_content, pool));
+ representation->dir->size = revision_content->len;
+ representation->target.size = header->len - header_size;
+ svn_stringbuf_appendcstr(header, "ENDREP\n");
+ *content = svn_stringbuf__morph_into_string(header);
+ }
+ else
+ {
+ /* no delta base (not even a dummy) -> PLAIN rep */
+ representation->target.size = revision_content->len;
+ representation->dir->size = revision_content->len;
+ *content = svn_string_createf(pool, "PLAIN\n%sENDREP\n",
+ revision_content->data);
+ }
+
+ return SVN_NO_ERROR;
+
+ /* construct the new noderev content. No side-effects.*/
+ case noderev_fragment:
+ /* get the original noderev as string */
+ node = fragment->data;
+ SVN_ERR(get_content(&revision_content, fs,
+ node->revision->revision, pool));
+ node_rev = svn_stringbuf_ncreate(revision_content->data +
+ node->original.offset,
+ node->original.size,
+ pool);
+
+ /* update the values that may have hanged for target */
+ update_id(node_rev, "id: ", node);
+ update_id(node_rev, "pred: ", node->predecessor);
+ update_text(node_rev, "text: ", node->text, pool);
+ update_text(node_rev, "props: ", node->props, pool);
+
+ *content = svn_stringbuf__morph_into_string(node_rev);
+ return SVN_NO_ERROR;
+ }
+
+ SVN_ERR_ASSERT(0);
+
+ return SVN_NO_ERROR;
+}
+
+/* In the repository at PATH, restore the original content in case we ran
+ * this reorg tool before. Use POOL for allocations.
+ */
+static svn_error_t *
+prepare_repo(const char *path, apr_pool_t *pool)
+{
+ svn_node_kind_t kind;
+
+ const char *old_path = svn_dirent_join(path, "db/old", pool);
+ const char *new_path = svn_dirent_join(path, "new", pool);
+ const char *revs_path = svn_dirent_join(path, "db/revs", pool);
+ const char *old_rep_cache_path = svn_dirent_join(path, "db/rep-cache.db.old", pool);
+ const char *rep_cache_path = svn_dirent_join(path, "db/rep-cache.db", pool);
+
+ /* is there a backup? */
+ SVN_ERR(svn_io_check_path(old_path, &kind, pool));
+ if (kind == svn_node_dir)
+ {
+ /* yes, restore the org content from it */
+ SVN_ERR(svn_io_remove_dir2(new_path, TRUE, NULL, NULL, pool));
+ SVN_ERR(svn_io_file_move(revs_path, new_path, pool));
+ SVN_ERR(svn_io_file_move(old_path, revs_path, pool));
+ SVN_ERR(svn_io_remove_dir2(new_path, TRUE, NULL, NULL, pool));
+ }
+
+ /* same for the rep cache db */
+ SVN_ERR(svn_io_check_path(old_rep_cache_path, &kind, pool));
+ if (kind == svn_node_file)
+ SVN_ERR(svn_io_file_move(old_rep_cache_path, rep_cache_path, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* In the repository at PATH, create a backup of the orig content and
+ * replace it with the reorg'ed. Use POOL for allocations.
+ */
+static svn_error_t *
+activate_new_revs(const char *path, apr_pool_t *pool)
+{
+ svn_node_kind_t kind;
+
+ const char *old_path = svn_dirent_join(path, "db/old", pool);
+ const char *new_path = svn_dirent_join(path, "new", pool);
+ const char *revs_path = svn_dirent_join(path, "db/revs", pool);
+ const char *old_rep_cache_path = svn_dirent_join(path, "db/rep-cache.db.old", pool);
+ const char *rep_cache_path = svn_dirent_join(path, "db/rep-cache.db", pool);
+
+ /* if there is no backup, yet, move the current repo content to the backup
+ * and place it with the new (reorg'ed) data. */
+ SVN_ERR(svn_io_check_path(old_path, &kind, pool));
+ if (kind == svn_node_none)
+ {
+ SVN_ERR(svn_io_file_move(revs_path, old_path, pool));
+ SVN_ERR(svn_io_file_move(new_path, revs_path, pool));
+ }
+
+ /* same for the rep cache db */
+ SVN_ERR(svn_io_check_path(old_rep_cache_path, &kind, pool));
+ if (kind == svn_node_none)
+ SVN_ERR(svn_io_file_move(rep_cache_path, old_rep_cache_path, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Write tool usage info text to OSTREAM using PROGNAME as a prefix and
+ * POOL for allocations.
+ */
+static void
+print_usage(svn_stream_t *ostream, const char *progname,
+ apr_pool_t *pool)
+{
+ svn_error_clear(svn_stream_printf(ostream, pool,
+ "\n"
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ "!!! This is an experimental tool. Don't use it on production data !!!\n"
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ "\n"
+ "Usage: %s <repo> <cachesize>\n"
+ "\n"
+ "Optimize the repository at local path <repo> staring from revision 0.\n"
+ "Use up to <cachesize> MB of memory for caching. This does not include\n"
+ "temporary representation of the repository structure, i.e. the actual\n"
+ "memory will be higher and <cachesize> be the lower limit.\n",
+ progname));
+}
+
+/* linear control flow */
+int main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ svn_stream_t *ostream;
+ svn_error_t *svn_err;
+ const char *repo_path = NULL;
+ svn_revnum_t start_revision = 0;
+ apr_size_t memsize = 0;
+ apr_uint64_t temp = 0;
+ fs_fs_t *fs;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ svn_err = svn_stream_for_stdout(&ostream, pool);
+ if (svn_err)
+ {
+ svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
+ return 2;
+ }
+
+ if (argc != 3)
+ {
+ print_usage(ostream, argv[0], pool);
+ return 2;
+ }
+
+ svn_err = svn_cstring_strtoui64(&temp, argv[2], 0, APR_SIZE_MAX, 10);
+ if (svn_err)
+ {
+ print_usage(ostream, argv[0], pool);
+ svn_error_clear(svn_err);
+ return 2;
+ }
+
+ memsize = (apr_size_t)temp;
+ repo_path = argv[1];
+ start_revision = 0;
+
+ printf("\nPreparing repository\n");
+ svn_err = prepare_repo(repo_path, pool);
+
+ if (!svn_err)
+ {
+ printf("Reading revisions\n");
+ svn_err = read_revisions(&fs, repo_path, start_revision, memsize, pool);
+ }
+
+ if (!svn_err)
+ {
+ printf("\nReordering revision content\n");
+ svn_err = reorder_revisions(fs, pool);
+ }
+
+ if (!svn_err)
+ {
+ printf("\nPacking and writing revisions\n");
+ svn_err = pack_and_write_revisions(fs, pool);
+ }
+
+ if (!svn_err)
+ {
+ printf("\nSwitch to new revs\n");
+ svn_err = activate_new_revs(repo_path, pool);
+ }
+
+ if (svn_err)
+ {
+ svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
+ return 2;
+ }
+
+ return 0;
+}
diff --git a/tools/dev/gcov.patch b/tools/dev/gcov.patch
deleted file mode 100644
index b90db97..0000000
--- a/tools/dev/gcov.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-This patch can be used to generate a report showing what C source
-lines are executed when the testsuite is run. gcc is required. After
-applying this patch do:
-
-$ ./configure --enable-gcov
-$ make check
-$ make gcov
-
-Now look at gcov-report.html and the annotated source files it links
-to.
-
-See also gcov(1), gcc(1).
-
-Index: Makefile.in
-===================================================================
---- Makefile.in (revision 32484)
-+++ Makefile.in (working copy)
-@@ -443,6 +443,36 @@
- @$(MAKE) check \
- BASE_URL=svn+ssh://localhost`pwd`/subversion/tests/cmdline
-
-+gcov:
-+ @echo -n "Collecting source files ..." ; \
-+ FILES=`find subversion/ -path '*/tests/*' -prune -o \
-+ -name '*.c' -print`; \
-+ echo "Done." ; \
-+ echo "Coverage report Subversion r`svnversion .`<br>" \
-+ echo "`date`<br>" \
-+ > gcov-report.html; \
-+ echo `uname -o -r -m` "<br>" \
-+ >> gcov-report.html; \
-+ (for file in $$FILES; do \
-+ echo $$file 1>&2 ; \
-+ base=`echo $$file | sed -e 's/.c$$//' `; \
-+ if [ -f "$$base.da" ] ; then \
-+ obj=$$base.o; \
-+ else \
-+ obj=`dirname $$base`/.libs/`basename $$base`.o; \
-+ fi; \
-+ stats=`gcov --preserve-paths \
-+ --object-directory=$$obj \
-+ $$file | sed -e "s/Creating.*//" | \
-+ sed -s "s|$$PWD/||"` \
-+ mangled=`echo $$base | tr '/' '#'`; \
-+ fixed=`echo $$base | tr '/' '_'`; \
-+ mv *$$mangled.c.gcov $$fixed.c.gcov; \
-+ echo -n $$stats | \
-+ sed -e "s/in file/in file <a href=\"$$fixed.c.gcov\">/"; \
-+ echo "</a><br>"; \
-+ done) | sort -g >> gcov-report.html
-+
- bdbcheck:
- @$(MAKE) check FS_TYPE=bdb
-
-Index: configure.ac
-===================================================================
---- configure.ac (revision 32484)
-+++ configure.ac (working copy)
-@@ -684,6 +684,14 @@
- # # do nothing
- fi
-
-+AC_ARG_ENABLE(gcov,
-+AC_HELP_STRING([--enable-gcov],
-+ [Turn on coverage testing (GCC only).]),
-+[
-+ if test "$enableval" = "yes" ; then
-+ CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
-+ fi
-+])
-
- AC_ARG_WITH(editor,
- AS_HELP_STRING([--with-editor=PATH],
diff --git a/tools/dev/gdb-py/README b/tools/dev/gdb-py/README
new file mode 100644
index 0000000..38133f1
--- /dev/null
+++ b/tools/dev/gdb-py/README
@@ -0,0 +1,29 @@
+This directory includes a Python module which will integrate with gdb which
+can be used to pretty-print various Subversion types. For additional
+information about gdb pretty-printing, see:
+
+ http://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html
+
+
+How to Use
+----------
+To enable pretty printing of selected Subversion types, put the following code
+in your ~/.gdbinit:
+
+[[[
+python
+import sys, os.path
+sys.path.insert(0, os.path.expanduser('~/dev/svn-trunk/tools/dev/gdb-py'))
+from svndbg.printers import register_libsvn_printers
+register_libsvn_printers(None)
+end
+]]]
+
+Change the path to point to the correct location on your platform for the
+gdb-py directory, and then load gdb. Everything should Just Work.
+(I believe this requires gdb >= 7.0, but earlier versions may also work.)
+
+The list of currently supported types for pretty printing is a bit lacking,
+so should you run into a type which could be useful to be pretty printed,
+read the documentation referenced above and follow the existing examples
+to extend the pretty-printing support. Enjoy!
diff --git a/tools/dev/gdb-py/svndbg/__init__.py b/tools/dev/gdb-py/svndbg/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/dev/gdb-py/svndbg/__init__.py
diff --git a/tools/dev/gdb-py/svndbg/printers.py b/tools/dev/gdb-py/svndbg/printers.py
new file mode 100644
index 0000000..da041b4
--- /dev/null
+++ b/tools/dev/gdb-py/svndbg/printers.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import gdb
+import re
+
+import gdb.printing
+from gdb.printing import RegexpCollectionPrettyPrinter
+
+
+class TypedefRegexCollectionPrettyPrinter(RegexpCollectionPrettyPrinter):
+ """Class for implementing a collection of pretty-printers, matching the
+ type name to a regular expression.
+
+ A pretty-printer in this collection will be used if the type of the
+ value to be printed matches the printer's regular expression, or if
+ the value is a pointer to and/or typedef to a type name that matches
+ its regular expression. The variations are tried in this order:
+
+ 1. the type name as known to the debugger (could be a 'typedef');
+ 2. the type after stripping off any number of layers of 'typedef';
+ 3. if it is a pointer, the pointed-to type;
+ 4. if it is a pointer, the pointed-to type minus some 'typedef's.
+
+ In all cases, ignore 'const' and 'volatile' qualifiers. When
+ matching the pointed-to type, dereference the value or use 'None' if
+ the value was a null pointer.
+
+ This class is modeled on RegexpCollectionPrettyPrinter, which (in GDB
+ 7.3) matches on the base type's tag name and can't match a pointer
+ type or any other type that doesn't have a tag name.
+ """
+
+ def __init__(self, name):
+ super(TypedefRegexCollectionPrettyPrinter, self).__init__(name)
+
+ def __call__(self, val):
+ """Find and return an instantiation of a printer for VAL.
+ """
+
+ def lookup_type(type, val):
+ """Return the first printer whose regular expression matches the
+ name (tag name for struct/union/enum types) of TYPE, ignoring
+ any 'const' or 'volatile' qualifiers.
+
+ VAL is a gdb.Value, or may be None to indicate a dereferenced
+ null pointer. TYPE is the associated gdb.Type.
+ """
+ if type.code in [gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION,
+ gdb.TYPE_CODE_ENUM]:
+ typename = type.tag
+ else:
+ typename = str(type.unqualified())
+ for printer in self.subprinters:
+ if printer.enabled and printer.compiled_re.search(typename):
+ return printer.gen_printer(val)
+
+ def lookup_type_or_alias(type, val):
+ """Return the first printer matching TYPE, or else if TYPE is a
+ typedef then the first printer matching the aliased type.
+
+ VAL is a gdb.Value, or may be None to indicate a dereferenced
+ null pointer. TYPE is the associated gdb.Type.
+ """
+ # First, look for a printer for the given (but unqualified) type.
+ printer = lookup_type(type, val)
+ if printer:
+ return printer
+
+ # If it's a typedef, look for a printer for the aliased type ...
+ while type.code == gdb.TYPE_CODE_TYPEDEF:
+ type = type.target()
+ printer = lookup_type(type, val)
+ if printer:
+ return printer
+
+ # First, look for a printer for the given (but unqualified) type, or
+ # its aliased type if it's a typedef.
+ printer = lookup_type_or_alias(val.type, val)
+ if printer:
+ return printer
+
+ # If it's a pointer, look for a printer for the pointed-to type.
+ if val.type.code == gdb.TYPE_CODE_PTR:
+ type = val.type.target()
+ printer = lookup_type_or_alias(
+ type, val and val.dereference() or None)
+ if printer:
+ return printer
+
+ # Cannot find a matching pretty printer in this collection.
+ return None
+
+class InferiorFunction:
+ """A class whose instances are callable functions on the inferior
+ process.
+ """
+ def __init__(self, function_name):
+ self.function_name = function_name
+ self.func = None
+
+ def __call__(self, *args):
+ if not self.func:
+ self.func = gdb.parse_and_eval(self.function_name)
+ return self.func(*args)
+
+def children_as_map(children_iterator):
+ """Convert an iteration of (key, value) pairs into the form required for
+ a pretty-printer 'children' method when the display-hint is 'map'.
+ """
+ for k, v in children_iterator:
+ yield 'key', k
+ yield 'val', v
+
+
+########################################################################
+
+# Pretty-printing for APR library types.
+
+# Some useful gdb.Type instances that can be initialized before any object
+# files are loaded.
+pvoidType = gdb.lookup_type('void').pointer()
+cstringType = gdb.lookup_type('char').pointer()
+
+# Some functions that resolve to calls into the inferior process.
+apr_hash_count = InferiorFunction('apr_hash_count')
+apr_hash_first = InferiorFunction('apr_hash_first')
+apr_hash_next = InferiorFunction('apr_hash_next')
+svn__apr_hash_index_key = InferiorFunction('svn__apr_hash_index_key')
+svn__apr_hash_index_val = InferiorFunction('svn__apr_hash_index_val')
+
+def children_of_apr_hash(hash_p, value_type=None):
+ """Iterate over an 'apr_hash_t *' GDB value, in the way required for a
+ pretty-printer 'children' method when the display-hint is 'map'.
+ Cast the value pointers to VALUE_TYPE, or return values as '...' if
+ VALUE_TYPE is None.
+ """
+ hi = apr_hash_first(0, hash_p)
+ while (hi):
+ k = svn__apr_hash_index_key(hi).reinterpret_cast(cstringType)
+ if value_type:
+ val = svn__apr_hash_index_val(hi).reinterpret_cast(value_type)
+ else:
+ val = '...'
+ try:
+ key = k.string()
+ except:
+ key = '<unreadable>'
+ yield key, val
+ hi = apr_hash_next(hi)
+
+class AprHashPrinter:
+ """for 'apr_hash_t' of 'char *' keys and unknown values"""
+ def __init__(self, val):
+ if val:
+ self.hash_p = val.address
+ else:
+ self.hash_p = val
+
+ def to_string(self):
+ """Return a string to be displayed before children are displayed, or
+ return None if we don't want any such.
+ """
+ if not self.hash_p:
+ return 'NULL'
+ return 'hash of ' + str(apr_hash_count(self.hash_p)) + ' items'
+
+ def children(self):
+ if not self.hash_p:
+ return []
+ return children_as_map(children_of_apr_hash(self.hash_p))
+
+ def display_hint(self):
+ return 'map'
+
+def children_of_apr_array(array, value_type):
+ """Iterate over an 'apr_array_header_t' GDB value, in the way required for
+ a pretty-printer 'children' method when the display-hint is 'array'.
+ Cast the values to VALUE_TYPE.
+ """
+ nelts = int(array['nelts'])
+ elts = array['elts'].reinterpret_cast(value_type.pointer())
+ for i in range(nelts):
+ yield str(i), elts[i]
+
+class AprArrayPrinter:
+ """for 'apr_array_header_t' of unknown elements"""
+ def __init__(self, val):
+ self.array = val
+
+ def to_string(self):
+ if not self.array:
+ return 'NULL'
+ nelts = self.array['nelts']
+ return 'array of ' + str(int(nelts)) + ' items'
+
+ def children(self):
+ # We can't display the children as we don't know their type.
+ return []
+
+ def display_hint(self):
+ return 'array'
+
+########################################################################
+
+# Pretty-printing for Subversion libsvn_subr types.
+
+class SvnBooleanPrinter:
+ """for svn_boolean_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if self.val is None:
+ return '(NULL)'
+ if self.val:
+ return 'TRUE'
+ else:
+ return 'FALSE'
+
+class SvnStringPrinter:
+ """for svn_string_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ data = self.val['data']
+ len = int(self.val['len'])
+ return data.string(length=len)
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+class SvnMergeRangePrinter:
+ """for svn_merge_range_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ r = self.val
+ start = int(r['start'])
+ end = int(r['end'])
+ if start >= 0 and start < end:
+ if start + 1 == end:
+ rs = str(end)
+ else:
+ rs = str(start + 1) + '-' + str(end)
+ elif end >= 0 and end < start:
+ if start == end + 1:
+ rs = '-' + str(start)
+ else:
+ rs = str(start) + '-' + str(end + 1)
+ else:
+ rs = '(INVALID: s=%d, e=%d)' % (start, end)
+ if not r['inheritable']:
+ rs += '*'
+ return rs
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+class SvnRangelistPrinter:
+ """for svn_rangelist_t"""
+ def __init__(self, val):
+ self.array = val
+ self.svn_merge_range_t = gdb.lookup_type('svn_merge_range_t')
+
+ def to_string(self):
+ if not self.array:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_array(self.array,
+ self.svn_merge_range_t.pointer()):
+ if s:
+ s += ','
+ s += SvnMergeRangePrinter(val).to_string()
+ return s
+
+ def display_hint(self):
+ if self.array:
+ return 'string'
+
+class SvnMergeinfoPrinter:
+ """for svn_mergeinfo_t"""
+ def __init__(self, val):
+ self.hash_p = val
+ self.svn_rangelist_t = gdb.lookup_type('svn_rangelist_t')
+
+ def to_string(self):
+ if self.hash_p == 0:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_hash(self.hash_p,
+ self.svn_rangelist_t.pointer()):
+ if s:
+ s += '; '
+ s += key + ':' + SvnRangelistPrinter(val).to_string()
+ return '{ ' + s + ' }'
+
+class SvnMergeinfoCatalogPrinter:
+ """for svn_mergeinfo_catalog_t"""
+ def __init__(self, val):
+ self.hash_p = val
+ self.svn_mergeinfo_t = gdb.lookup_type('svn_mergeinfo_t')
+
+ def to_string(self):
+ if self.hash_p == 0:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_hash(self.hash_p,
+ self.svn_mergeinfo_t):
+ if s:
+ s += ',\n '
+ s += "'" + key + "': " + SvnMergeinfoPrinter(val).to_string()
+ return '{ ' + s + ' }'
+
+########################################################################
+
+# Pretty-printing for Subversion libsvn_client types.
+
+class SvnPathrevPrinter:
+ """for svn_client__pathrev_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ rev = int(self.val['rev'])
+ url = self.val['url'].string()
+ repos_root_url = self.val['repos_root_url'].string()
+ relpath = url[len(repos_root_url):]
+ return "%s@%d" % (relpath, rev)
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+
+########################################################################
+
+libapr_printer = None
+libsvn_printer = None
+
+def build_libsvn_printers():
+ """Construct the pretty-printer objects."""
+
+ global libapr_printer, libsvn_printer
+
+ libapr_printer = TypedefRegexCollectionPrettyPrinter("libapr")
+ libapr_printer.add_printer('apr_hash_t', r'^apr_hash_t$',
+ AprHashPrinter)
+ libapr_printer.add_printer('apr_array_header_t', r'^apr_array_header_t$',
+ AprArrayPrinter)
+
+ libsvn_printer = TypedefRegexCollectionPrettyPrinter("libsvn")
+ libsvn_printer.add_printer('svn_boolean_t', r'^svn_boolean_t$',
+ SvnBooleanPrinter)
+ libsvn_printer.add_printer('svn_string_t', r'^svn_string_t$',
+ SvnStringPrinter)
+ libsvn_printer.add_printer('svn_client__pathrev_t', r'^svn_client__pathrev_t$',
+ SvnPathrevPrinter)
+ libsvn_printer.add_printer('svn_merge_range_t', r'^svn_merge_range_t$',
+ SvnMergeRangePrinter)
+ libsvn_printer.add_printer('svn_rangelist_t', r'^svn_rangelist_t$',
+ SvnRangelistPrinter)
+ libsvn_printer.add_printer('svn_mergeinfo_t', r'^svn_mergeinfo_t$',
+ SvnMergeinfoPrinter)
+ libsvn_printer.add_printer('svn_mergeinfo_catalog_t', r'^svn_mergeinfo_catalog_t$',
+ SvnMergeinfoCatalogPrinter)
+
+
+def register_libsvn_printers(obj):
+ """Register the pretty-printers for the object file OBJ."""
+
+ global libapr_printer, libsvn_printer
+
+ # Printers registered later take precedence.
+ gdb.printing.register_pretty_printer(obj, libapr_printer)
+ gdb.printing.register_pretty_printer(obj, libsvn_printer)
+
+
+# Construct the pretty-printer objects, once, at GDB start-up time when this
+# Python module is loaded. (Registration happens later, once per object
+# file.)
+build_libsvn_printers()
diff --git a/tools/dev/gen-py-errors.py b/tools/dev/gen-py-errors.py
index 6204589..9ca0365 100755
--- a/tools/dev/gen-py-errors.py
+++ b/tools/dev/gen-py-errors.py
@@ -23,44 +23,87 @@
# ====================================================================
#
#
-# Meant to be run from the root of a Subversion working copy. If anybody
-# wants to do some path magic to improve that use, feel free.
-
-import sys, os
-sys.path.append(os.path.join('subversion', 'bindings', 'swig',
- 'python', 'tests'))
-
+# Locates svn_error_codes.h based on its relative location to this script.
+#
+# Generates to STDOUT. Typically, redirect this into svntest/err.py
+#
-import setup_path
+import sys
+import os
+import re
-header = '''#!/usr/bin/env python
-### This file automatically generated by tools/dev/gen-py-error.py,
+HEADER = '''#!/usr/bin/env python
+### This file automatically generated by tools/dev/gen-py-errors.py,
### which see for more information
###
### It is versioned for convenience.
-
'''
+# Established by svn 1.0. May as well hard-code these.
+APR_OS_START_ERROR = 20000
+APR_OS_START_USERERR = APR_OS_START_ERROR + 50000 * 2
+SVN_ERR_CATEGORY_SIZE = 5000
+
+RE_CAT_NAME = re.compile(r'SVN_ERR_([A-Z_]+)_CATEG')
+RE_CAT_VALUE = re.compile(r'\d+')
+
+RE_DEF_NAME = re.compile(r'SVN_ERRDEF\(([A-Z0-9_]+)')
+RE_DEF_VALUE = re.compile(r'SVN_ERR_([A-Z_]+)_CATEG[^0-9]*([0-9]+)')
+
+
+def write_output(codes):
+ print HEADER
+
+ for name, value in codes:
+ # skip SVN_ERR_ on the name
+ print '%s = %d' % (name[8:], value)
+
+
+def main(codes_fname):
+ categ = { }
+ codes = [ ]
-def write_output(errs, filename):
- out = open(filename, 'w')
- out.write(header)
+ f = open(codes_fname)
- for name, val in errs:
- out.write('%s = %d\n' % (name, val))
+ # Parse all the category start values
+ while True:
+ line = f.next()
+ m = RE_CAT_NAME.search(line)
+ if m:
+ name = m.group(1)
+ m = RE_CAT_VALUE.search(f.next())
+ assert m
+ value = int(m.group(0))
+ categ[name] = APR_OS_START_USERERR + value * SVN_ERR_CATEGORY_SIZE
- out.close()
+ elif line.strip() == 'SVN_ERROR_START':
+ break
+ # Parse each of the error values
+ while True:
+ line = f.next()
+ m = RE_DEF_NAME.search(line)
+ if m:
+ name = m.group(1)
+ line = f.next()
+ m = RE_DEF_VALUE.search(line)
+ if not m:
+ # SVN_ERR_WC_NOT_DIRECTORY is defined as equal to NOT_WORKING_COPY
+ # rather than relative to SVN_ERR_WC_CATEGORY_START
+ #print 'SKIP:', line
+ continue
+ cat = m.group(1)
+ value = int(m.group(2))
+ codes.append((name, categ[cat] + value))
-def main(output_filename):
- import core
+ elif line.strip() == 'SVN_ERROR_END':
+ break
- errs = [e for e in dir(core.svn.core) if e.startswith('SVN_ERR_')]
- codes = []
- for e in errs:
- codes.append((e[8:], getattr(core.svn.core, e)))
- write_output(codes, output_filename)
+ write_output(sorted(codes))
if __name__ == '__main__':
- main(os.path.join('subversion', 'tests', 'cmdline', 'svntest', 'err.py'))
+ this_dir = os.path.dirname(os.path.abspath(__file__))
+ codes_fname = os.path.join(this_dir, os.path.pardir, os.path.pardir,
+ 'subversion', 'include', 'svn_error_codes.h')
+ main(codes_fname)
diff --git a/tools/dev/histogram.py b/tools/dev/histogram.py
new file mode 100755
index 0000000..4b977fa
--- /dev/null
+++ b/tools/dev/histogram.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+import operator
+
+
+def count(infile):
+ # infile should be a simple file with author names on each line
+ counts = {}
+ for line in infile:
+ author = line.strip()
+ counts[author] = counts.get(author, 0) + 1
+
+ return counts
+
+
+def histogram(counts, width):
+ max_len = max([len(author) for author in counts.keys()])
+ max_count = max(counts.values())
+
+ adjustor = float(max_count) / (width - max_len - 3)
+
+ for author, count in sorted(counts.items(),
+ key=operator.itemgetter(1), # sort on count
+ reverse=True):
+ print "%-*s | %s" % (max_len, author, "X"*int(count/adjustor))
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ ### TODO: Automagically determine terminal width
+ width = 80
+ else:
+ width = int(sys.argv[1])
+ histogram(count(sys.stdin), width)
diff --git a/tools/dev/merge-graph.py b/tools/dev/merge-graph.py
new file mode 100755
index 0000000..7f7ced2
--- /dev/null
+++ b/tools/dev/merge-graph.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+args_message = '[-f png|svg|gif|dia... [-f ...]] GRAPH_CONFIG_FILE...'
+help_message = """Produce pretty graphs representing branches and merging.
+For each config file specified, construct a graph and write it as a PNG file
+(or other graphical file formats)."""
+
+import sys
+import getopt
+from mergegraph import MergeDot
+
+
+# If run as a program, process each input filename as a graph config file.
+if __name__ == '__main__':
+ optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['format'])
+
+ prog_name = sys.argv[0]
+ if not args:
+ usage = '%s: usage: "%s %s"' % (prog_name, prog_name, args_message)
+ print >> sys.stderr, usage
+ sys.exit(1)
+
+ formats = []
+
+ for opt, opt_arg in optlist:
+ if opt == '-f':
+ formats.append(opt_arg)
+
+ if not formats:
+ formats.append('png')
+
+ for config_filename in args:
+ print "%s: reading '%s'," % (prog_name, config_filename),
+ graph = MergeDot(config_filename, rankdir='LR', dpi='72')
+ for format in formats:
+ filename = '%s.%s' % (graph.basename, format)
+ print "writing '%s'" % filename,
+ graph.save(format=format, filename=filename)
+ print
diff --git a/tools/dev/mergegraph/__init__.py b/tools/dev/mergegraph/__init__.py
new file mode 100644
index 0000000..6dfb7d9
--- /dev/null
+++ b/tools/dev/mergegraph/__init__.py
@@ -0,0 +1,20 @@
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+from mergegraph import MergeDot, MergeGraph, MergeSubgraph
diff --git a/tools/dev/mergegraph/mergegraph.py b/tools/dev/mergegraph/mergegraph.py
new file mode 100644
index 0000000..834b164
--- /dev/null
+++ b/tools/dev/mergegraph/mergegraph.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+# Config file format:
+example = """
+ [graph]
+ filename = merge-sync-1.png
+ title = Sync Merge: CC vs SVN
+ # Branches: (branch name, branched from node, first rev, last rev).
+ branches = [
+ ('A', 'O0', 1, 4),
+ ('O', None, 0, 0),
+ ('B', 'O0', 1, 5)
+ ]
+ # Changes: nodes in which a change was committed; merge targets need not
+ # be listed here.
+ changes = [
+ 'A1', 'A2', 'A3', 'A4',
+ 'B1', 'B2', 'B3', 'B4', 'B5'
+ ]
+ # Merges: (base node, source-right node, target node, label).
+ # Base is also known as source-left.
+ merges = [
+ ('O0', 'A:1', 'B3', 'sync'),
+ ('A2', 'A:3', 'B5', 'sync'),
+ ]
+ # Annotations for nodes: (node, annotation text).
+ annotations = [
+ ('A2', 'cc:YCA')
+ ]
+"""
+
+# Notes about different kinds of merge.
+#
+# A basic 3-way merge is ...
+#
+# The ClearCase style of merge is a 3-way merge.
+#
+# The Subversion style of merge (that is, one phase of a Subversion merge)
+# is a three-way merge with its base (typically the YCA) on the source branch.
+
+
+import sys
+import pydot
+from pydot import Node, Edge
+
+
+def mergeinfo_to_node_list(mi):
+ """Convert a mergeinfo string such as '/foo:1,3-5*' into a list of
+ node names such as ['foo1', 'foo3', 'foo4', 'foo5'].
+ """
+ ### Doesn't yet strip the leading slash.
+ l = []
+ if mi:
+ for mi_str in mi.split(' '):
+ path, ranges = mi_str.split(':')
+ for r in ranges.split(','):
+ if r.endswith('*'):
+ # TODO: store & use this 'non-inheritable' flag
+ # Remove the flag
+ r = r[:-1]
+ rlist = r.split('-')
+ r1 = int(rlist[0])
+ if len(rlist) == 2:
+ r2 = int(rlist[1])
+ else:
+ r2 = r1
+ for rev in range(r1, r2 + 1):
+ l.append(path + str(rev))
+ return l
+
+
+class MergeGraph(pydot.Graph):
+ """Base class, not intended for direct use. Use MergeDot for the main
+ graph and MergeSubgraph for a subgraph.
+ """
+
+ def mk_origin_node(graph, name, label):
+ """Add a node to the graph"""
+ graph.add_node(Node(name, label=label, shape='plaintext'))
+
+ def mk_invis_node(graph, name):
+ """Add a node to the graph"""
+ graph.add_node(Node(name, style='invis'))
+
+ def mk_node(graph, name, label=None):
+ """Add a node to the graph, if not already present"""
+ if not graph.get_node(name):
+ if not label:
+ label = name
+ if name in graph.changes:
+ graph.add_node(Node(name, label=label))
+ else:
+ graph.add_node(Node(name, color='grey', label=''))
+
+ def mk_merge_target(graph, target_node, important):
+ """Add a merge target node to the graph."""
+ if important:
+ color = 'red'
+ else:
+ color = 'black'
+ graph.add_node(Node(target_node, color=color, fontcolor=color, style='bold'))
+
+ def mk_edge(graph, name1, name2, **attrs):
+ """Add an ordinary edge to the graph"""
+ graph.add_edge(Edge(name1, name2, dir='none', style='dotted', color='grey', **attrs))
+
+ def mk_br_edge(graph, name1, name2):
+ """Add a branch-creation edge to the graph"""
+ # Constraint=false to avoid the Y-shape skewing the nice parallel branch lines
+ graph.mk_edge(name1, name2, constraint='false')
+
+ def mk_merge_edge(graph, src_node, tgt_node, kind, label, important):
+ """Add a merge edge to the graph"""
+ if important:
+ color = 'red'
+ else:
+ color = 'grey'
+ e = Edge(src_node, tgt_node, constraint='false',
+ label='"' + label + '"',
+ color=color, fontcolor=color,
+ style='bold')
+ if kind.startswith('cherry'):
+ e.set_style('dashed')
+ graph.add_edge(e)
+
+ def mk_mergeinfo_edge(graph, base_node, src_node, important):
+ """"""
+ if important:
+ color = 'red'
+ else:
+ color = 'grey'
+ graph.add_edge(Edge(base_node, src_node,
+ dir='both', arrowtail='odot', arrowhead='tee',
+ color=color, constraint='false'))
+
+ def mk_invis_edge(graph, name1, name2):
+ """Add an invisible edge to the graph"""
+ graph.add_edge(Edge(name1, name2, style='invis'))
+
+ def add_merge(graph, merge, important):
+ """Add a merge"""
+ base_node, src_node, tgt_node, kind = merge
+
+ if base_node and src_node: # and not kind.startwith('cherry'):
+ graph.mk_mergeinfo_edge(base_node, src_node, important)
+
+ # Merge target node
+ graph.mk_merge_target(tgt_node, important)
+
+ # Merge edge
+ graph.mk_merge_edge(src_node, tgt_node, kind, kind, important)
+
+ def add_annotation(graph, node, label, color='lightblue'):
+ """Add a graph node that serves as an annotation to a normal node.
+ More than one annotation can be added to the same normal node.
+ """
+ subg_name = node + '_annotations'
+
+ def get_subgraph(graph, name):
+ """Equivalent to pydot.Graph.get_subgraph() when there is no more than
+ one subgraph of the given name, but working aroung a bug in
+ pydot.Graph.get_subgraph().
+ """
+ for subg in graph.get_subgraph_list():
+ if subg.get_name() == name:
+ return subg
+ return None
+
+ g = get_subgraph(graph, subg_name)
+ if not g:
+ g = pydot.Subgraph(subg_name, rank='same')
+ graph.add_subgraph(g)
+
+ ann_node = node + '_'
+ while g.get_node(ann_node):
+ ann_node = ann_node + '_'
+ g.add_node(Node(ann_node, shape='box', style='filled', color=color,
+ label='"' + label + '"'))
+ g.add_edge(Edge(ann_node, node, style='solid', color=color,
+ dir='none', constraint='false'))
+
+class MergeSubgraph(MergeGraph, pydot.Subgraph):
+ """"""
+ def __init__(graph, **attrs):
+ """"""
+ MergeGraph.__init__(graph)
+ pydot.Subgraph.__init__(graph, **attrs)
+
+class MergeDot(MergeGraph, pydot.Dot):
+ """
+ # TODO: In the 'merges' input, find the predecessor automatically.
+ """
+ def __init__(graph, config_filename=None,
+ filename=None, title=None, branches=None, changes=None,
+ merges=[], annotations=[], **attrs):
+ """Return a new MergeDot graph generated from a config file or args."""
+ MergeGraph.__init__(graph)
+ pydot.Dot.__init__(graph, **attrs)
+
+ if config_filename:
+ graph.read_config(config_filename)
+ else:
+ graph.filename = filename
+ graph.title = title
+ graph.branches = branches
+ graph.changes = changes
+ graph.merges = merges
+ graph.annotations = annotations
+
+ graph.construct()
+
+ def read_config(graph, config_filename):
+ """Initialize a MergeDot graph's input data from a config file."""
+ import ConfigParser
+ if config_filename.endswith('.txt'):
+ default_basename = config_filename[:-4]
+ else:
+ default_basename = config_filename
+
+ config = ConfigParser.SafeConfigParser({ 'basename': default_basename,
+ 'title': None,
+ 'merges': '[]',
+ 'annotations': '[]' })
+ files_read = config.read(config_filename)
+ if len(files_read) == 0:
+ print >> sys.stderr, 'graph: unable to read graph config from "' + config_filename + '"'
+ sys.exit(1)
+ graph.basename = config.get('graph', 'basename')
+ graph.title = config.get('graph', 'title')
+ graph.branches = eval(config.get('graph', 'branches'))
+ graph.changes = eval(config.get('graph', 'changes'))
+ graph.merges = eval(config.get('graph', 'merges'))
+ graph.annotations = eval(config.get('graph', 'annotations'))
+
+ def construct(graph):
+ """"""
+ # Origin nodes (done first, in an attempt to set the order)
+ for br, orig, r1, head in graph.branches:
+ name = br + '0'
+ if r1 > 0:
+ graph.mk_origin_node(name, br)
+ else:
+ graph.mk_node(name, label=br)
+
+ # Edges and target nodes for merges
+ for merge in graph.merges:
+ # Emphasize the last merge, as it's the important one
+ important = (merge == graph.merges[-1])
+ graph.add_merge(merge, important)
+
+ # Parallel edges for basic lines of descent
+ for br, orig, r1, head in graph.branches:
+ sub_g = MergeSubgraph(ordering='out')
+ for i in range(1, head + 1):
+ prev_n = br + str(i - 1)
+ this_n = br + str(i)
+
+ # Normal edges and nodes
+ if i < r1:
+ graph.mk_invis_node(this_n)
+ else:
+ graph.mk_node(this_n)
+ if i <= r1:
+ graph.mk_invis_edge(prev_n, this_n)
+ else:
+ graph.mk_edge(prev_n, this_n)
+
+ # Branch creation edges
+ if orig:
+ sub_g.mk_br_edge(orig, br + str(r1))
+
+ graph.add_subgraph(sub_g)
+
+ # Annotations
+ for node, label in graph.annotations:
+ graph.add_annotation(node, label)
+
+ # A title for the graph (added last so it goes at the top)
+ if graph.title:
+ graph.add_node(Node('title', shape='plaintext', label='"' + graph.title + '"'))
+
+ def save(graph, format='png', filename=None):
+ """Save this merge graph to the given file format. If filename is None,
+ construct a filename from the basename of the original file (as passed
+ to the constructor and then stored in graph.basename) and the suffix
+ according to the given format.
+ """
+ if not filename:
+ filename = graph.basename + '.' + format
+ if format == 'sh':
+ import save_as_sh
+ save_as_sh.write_sh_file(graph, filename)
+ else:
+ pydot.Dot.write(graph, filename, format=format)
diff --git a/tools/dev/mergegraph/save_as_sh.py b/tools/dev/mergegraph/save_as_sh.py
new file mode 100644
index 0000000..3471538
--- /dev/null
+++ b/tools/dev/mergegraph/save_as_sh.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+
+# This module writes a sequence of 'svn' commands to a file, that when
+# run will perform the branching and merging described by a given MergeDot
+# graph description object.
+
+
+def shebang_line(out):
+ print >> out, '#!/bin/sh'
+
+def command(out, cmd, *args):
+ """Write the shell command CMD with the arguments ARGS to the file-like
+ object OUT.
+ """
+ print >> out, ' '.join((cmd,) + args)
+
+def svn(out, subcmd, *args):
+ """Write an svn command with the given subcommand and arguments. Write
+ to the file-like object OUT.
+ """
+ command(out, 'svn', subcmd, *args)
+
+def comment(out, text):
+ """Write the comment TEXT to the file-like object OUT.
+ """
+ print >> out, '#', text
+
+def node_branch(node_name):
+ """Extract branch name from a node name.
+ ### TODO: multi-char names.
+ """
+ return node_name[:1]
+
+def node_url(node_name):
+ """Extract the URL (in command-line repo-relative URL syntax) from a
+ node name.
+ """
+ return '^/' + node_branch(node_name)
+
+def node_rev(node_name):
+ """Extract revnum (as an integer) from a node name.
+ ### TODO: multi-char names.
+ """
+ return int(node_name[1:]) + 1
+
+def add(revs, node_name, action, *args):
+ """Add the tuple (ACTION, (ARGS)) to the list REVS[REVNUM].
+ """
+ revnum = node_rev(node_name)
+ if not revnum in revs:
+ revs[revnum] = []
+ revs[revnum].append((action, args))
+
+def write_recipe(graph, out):
+ """Write out a sequence of svn commands that will execute the branching
+ and merging shown in GRAPH. Write to the file-like object OUT.
+ """
+ revs = {} # keyed by revnum
+
+ for br, orig, r1, head in graph.branches:
+ if orig:
+ add(revs, br + str(r1), 'copy', orig, br)
+ else:
+ add(revs, br + str(r1), 'mkproj', br)
+
+ for base_node, src_node, tgt_node, kind in graph.merges:
+ add(revs, tgt_node, 'merge', src_node, tgt_node, kind)
+
+ for node_name in graph.changes:
+ # Originally the 'changes' list could have entries that overlapped with
+ # merges. We must either disallow that or filter out such changes here.
+ #if not node_name in revs:
+ add(revs, node_name, 'modify', node_name)
+
+ # Execute the actions for each revision in turn.
+ for r in sorted(revs.keys()):
+ comment(out, 'start r' + str(r))
+ for action, params in revs[r]:
+ #comment(out, '(' + action + ' ' + params + ')')
+ if action == 'mkproj':
+ (br,) = params
+ svn(out, 'mkdir', br, br + '/created_in_' + br)
+ elif action == 'copy':
+ (orig, br) = params
+ svn(out, 'copy', '-r' + str(node_rev(orig)), node_branch(orig), br)
+ elif action == 'modify':
+ (node_name,) = params
+ svn(out, 'mkdir', node_branch(node_name) + '/new_in_' + node_name)
+ elif action == 'merge':
+ (src_node, tgt_node, kind) = params
+ assert node_rev(tgt_node) == r
+ svn(out, 'update')
+ if kind == 'cherry':
+ svn(out, 'merge',
+ '-c' + str(node_rev(src_node)), node_url(src_node),
+ node_branch(tgt_node))
+ elif kind.startswith('reint'):
+ svn(out, 'merge', '--reintegrate',
+ node_url(src_node) + '@' + str(node_rev(src_node)),
+ node_branch(tgt_node))
+ else:
+ svn(out, 'merge',
+ node_url(src_node) + '@' + str(node_rev(src_node)),
+ node_branch(tgt_node))
+ else:
+ raise Exception('unknown action: %s' % action)
+ svn(out, 'commit', '-m', 'r' + str(r))
+
+def write_sh_file(graph, filename):
+ """Write a file containing a sequence of 'svn' commands that when run will
+ perform the branching and merging described by the MergeDot object
+ GRAPH. Write to a new file named FILENAME.
+ """
+ out_stream = open(filename, 'w')
+ shebang_line(out_stream)
+ write_recipe(graph, out_stream)
+ out_stream.close()
diff --git a/tools/dev/po-merge.py b/tools/dev/po-merge.py
index be515bb..15f0897 100755
--- a/tools/dev/po-merge.py
+++ b/tools/dev/po-merge.py
@@ -52,7 +52,7 @@ def parse_translation(f):
line = f.readline()
if line[0] != '"':
break
- msgid += '\n' + line[:-1]
+ msgid = msgid[:-1] + line[1:-1]
# Parse optional msgid_plural
msgid_plural = None
@@ -64,7 +64,7 @@ def parse_translation(f):
line = f.readline()
if line[0] != '"':
break
- msgid_plural += '\n' + line[:-1]
+ msgid_plural = msgid_plural[:-1] + line[1:-1]
# Parse msgstr
msgstr = []
@@ -117,9 +117,9 @@ def main(argv):
argv0 = os.path.basename(argv[0])
sys.exit('Usage: %s <lang.po>\n'
'\n'
- 'This script will replace the translations and flags in lang.po with\n'
- 'the translations and flags in the source po file read from standard\n'
- 'input. Strings that are not found in the source file are left untouched.\n'
+ 'This script will replace the translations and flags in lang.po (LF line endings)\n'
+ 'with the translations and flags in the source po file read from standard input.\n'
+ 'Strings that are not found in the source file are left untouched.\n'
'A backup copy of lang.po is saved as lang.po.bak.\n'
'\n'
'Example:\n'
diff --git a/tools/dev/remove-trailing-whitespace.sh b/tools/dev/remove-trailing-whitespace.sh
new file mode 100755
index 0000000..440dfaa
--- /dev/null
+++ b/tools/dev/remove-trailing-whitespace.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+ for ext in c h cpp java py pl rb hpp cmd bat; do
+ find . -name "*.$ext" -exec \
+ perl -pi -e 's/[ \t]*$//' {} + ;
+ # don't use \t to not strip ^L pagebreaks
+ done
diff --git a/tools/dev/sbox-ospath.py b/tools/dev/sbox-ospath.py
new file mode 100755
index 0000000..e510cd5
--- /dev/null
+++ b/tools/dev/sbox-ospath.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# USAGE:
+# $ ./sbox-ospath.py FILENAME
+#
+# This script will look for all lines in the file that use an expression
+# that looks like:
+# os.path.join(wc_dir, 'A', 'B')
+#
+# and rewrite that to:
+# sbox.ospath('A/B')
+#
+# Obviously, this relies heavily on standard naming for the variables in
+# our testing code. Visual inspection (and execution!) should be performed.
+#
+# The file is rewritten in place.
+#
+
+import sys
+import os
+import re
+
+RE_FIND_JOIN = re.compile(r'os\.path\.join\((?:sbox\.)?wc_dir, '
+ r'(["\'][^"\']*["\'](?:, ["\'][^"\']*["\'])*)\)')
+
+
+def rewrite_file(fname):
+ count = 0
+ lines = open(fname).readlines()
+ for i in range(len(lines)):
+ line = lines[i]
+ match = RE_FIND_JOIN.search(line)
+ if match:
+ start, end = match.span()
+ parts = match.group(1).replace('"', "'").replace("', '", '/')
+ lines[i] = line[:start] + 'sbox.ospath(' + parts + ')' + line[end:]
+ count += 1
+ if count == 0:
+ print 'No changes.'
+ else:
+ open(fname, 'w').writelines(lines)
+ print '%s rewrites performed.' % (count,)
+
+
+if __name__ == '__main__':
+ rewrite_file(sys.argv[1])
diff --git a/tools/dev/svnraisetreeconflict/main.c b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
index 752aae6..aa39816 100644
--- a/tools/dev/svnraisetreeconflict/main.c
+++ b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
@@ -43,6 +43,7 @@
#include "svn_version.h"
#include "private/svn_wc_private.h"
+#include "private/svn_cmdline_private.h"
#include "svn_private_config.h"
@@ -67,8 +68,8 @@
static svn_error_t *
version(apr_pool_t *pool)
{
- return svn_opt_print_help3(NULL, "svnraisetreeconflict", TRUE, FALSE, NULL,
- NULL, NULL, NULL, NULL, NULL, pool);
+ return svn_opt_print_help4(NULL, "svnraisetreeconflict", TRUE, FALSE, FALSE,
+ NULL, NULL, NULL, NULL, NULL, NULL, pool);
}
static void
@@ -218,10 +219,10 @@ raise_tree_conflict(int argc, const char **argv, apr_pool_t *pool)
/* Allocate and fill in the description data structures */
SVN_ERR(svn_dirent_get_absolute(&wc_abspath, wc_path, pool));
- left = svn_wc_conflict_version_create(repos_url1, path_in_repos1, peg_rev1,
- kind1, pool);
- right = svn_wc_conflict_version_create(repos_url2, path_in_repos2, peg_rev2,
- kind2, pool);
+ left = svn_wc_conflict_version_create2(repos_url1, NULL, path_in_repos1,
+ peg_rev1, kind1, pool);
+ right = svn_wc_conflict_version_create2(repos_url2, NULL, path_in_repos2,
+ peg_rev2, kind2, pool);
c = svn_wc_conflict_description_create_tree2(wc_abspath, kind,
operation, left, right, pool);
c->action = (svn_wc_conflict_action_t)action;
@@ -308,15 +309,14 @@ check_lib_versions(void)
{ "svn_wc", svn_wc_version },
{ NULL, NULL }
};
-
SVN_VERSION_DEFINE(my_version);
+
return svn_ver_check_list(&my_version, checklist);
}
int
main(int argc, const char *argv[])
{
- apr_allocator_t *allocator;
apr_pool_t *pool;
svn_error_t *err;
apr_getopt_t *os;
@@ -336,13 +336,7 @@ main(int argc, const char *argv[])
/* Create our top-level pool. Use a separate mutexless allocator,
* given this application is single threaded.
*/
- if (apr_allocator_create(&allocator))
- return EXIT_FAILURE;
-
- apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);
-
- pool = svn_pool_create_ex(NULL, allocator);
- apr_allocator_owner_set(allocator, pool);
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
/* Check library versions */
err = check_lib_versions();
@@ -372,10 +366,8 @@ main(int argc, const char *argv[])
if (APR_STATUS_IS_EOF(status))
break;
if (status != APR_SUCCESS)
- {
- usage(pool);
- return EXIT_FAILURE;
- }
+ usage(pool); /* this will exit() */
+
switch (opt)
{
case 'h':
@@ -386,8 +378,7 @@ main(int argc, const char *argv[])
exit(0);
break;
default:
- usage(pool);
- return EXIT_FAILURE;
+ usage(pool); /* this will exit() */
}
}
@@ -403,10 +394,7 @@ main(int argc, const char *argv[])
}
if (remaining_argv->nelts < 1)
- {
- usage(pool);
- return EXIT_FAILURE;
- }
+ usage(pool); /* this will exit() */
/* Do the main task */
SVNRAISETC_INT_ERR(raise_tree_conflict(remaining_argv->nelts,
diff --git a/tools/dev/unix-build/Makefile.svn b/tools/dev/unix-build/Makefile.svn
index 0c6615d..0bdddd5 100644
--- a/tools/dev/unix-build/Makefile.svn
+++ b/tools/dev/unix-build/Makefile.svn
@@ -29,9 +29,13 @@
# | the bot's health after making changes to this file. |
# |______________________________________________________________|
-ENABLE_PYTHON_BINDINGS ?= yes
ENABLE_PERL_BINDINGS ?= yes
-ENABLE_JAVA_BINDINGS ?= no # they don't build with thread-less APR...
+THREADING ?= yes
+ifeq ($(THREADING),yes)
+ENABLE_JAVA_BINDINGS ?= yes
+else
+ENABLE_JAVA_BINDINGS ?= no
+endif
USE_APR_ICONV ?= no # set to yes to use APR iconv instead of GNU iconv
PARALLEL ?= 1
CLEANUP ?= 1
@@ -61,18 +65,21 @@ OBJDIR = $(PWD)/objdir
BDB_MAJOR_VER = 4.7
BDB_VER = $(BDB_MAJOR_VER).25
-APR_VER = 1.4.5
+APR_VER = 1.4.6
APR_ICONV_VER = 1.2.1
-GNU_ICONV_VER = 1.13.1
-APR_UTIL_VER = 1.3.12
-HTTPD_VER = 2.2.19
+GNU_ICONV_VER = 1.14
+APR_UTIL_VER = 1.4.1
+HTTPD_VER = 2.2.22
NEON_VER = 0.29.6
-SERF_VER = 0.7.x
+SERF_VER = 1.2.0
SERF_OLD_VER = 0.3.1
-CYRUS_SASL_VER = 2.1.23
-SQLITE_VER = 3070603
-LIBMAGIC_VER = 5.07
-RUBY_VER = 1.8.7-p334
+CYRUS_SASL_VER = 2.1.25
+SQLITE_VER = 3071600
+LIBMAGIC_VER = 5.11
+RUBY_VER = 1.8.7-p358
+BZ2_VER = 1.0.6
+PYTHON_VER = 2.7.3
+JUNIT_VER = 4.10
BDB_DIST = db-$(BDB_VER).tar.gz
APR_ICONV_DIST = apr-iconv-$(APR_ICONV_VER).tar.gz
@@ -83,6 +90,9 @@ CYRUS_SASL_DIST = cyrus-sasl-$(CYRUS_SASL_VER).tar.gz
HTTPD_DIST = httpd-$(HTTPD_VER).tar.bz2
LIBMAGIC_DIST = file-$(LIBMAGIC_VER).tar.gz
RUBY_DIST = ruby-$(RUBY_VER).tar.gz
+BZ2_DIST = bzip2-$(BZ2_VER).tar.gz
+PYTHON_DIST = Python-$(PYTHON_VER).tgz
+JUNIT_DIST = junit-${JUNIT_VER}.jar
DISTFILES = $(DISTDIR)/$(NEON_DIST) \
$(DISTDIR)/$(SERF_DIST) \
@@ -92,7 +102,10 @@ DISTFILES = $(DISTDIR)/$(NEON_DIST) \
$(DISTDIR)/$(GNU_ICONV_DIST) \
$(DISTDIR)/$(CYRUS_SASL_DIST) \
$(DISTDIR)/$(LIBMAGIC_DIST) \
- $(DISTDIR)/$(RUBY_DIST)
+ $(DISTDIR)/$(RUBY_DIST) \
+ $(DISTDIR)/$(BZ2_DIST) \
+ $(DISTDIR)/$(PYTHON_DIST) \
+ $(DISTDIR)/$(JUNIT_DIST)
FETCH_CMD = wget -c
@@ -105,12 +118,16 @@ APR_UTIL_URL = http://svn.apache.org/repos/asf/apr/apr-util
HTTPD_URL = http://archive.apache.org/dist/httpd/$(HTTPD_DIST)
NEON_URL = http://webdav.org/neon/$(NEON_DIST)
#SERF_URL = http://serf.googlecode.com/files/$(SERF_DIST)
-SERF_URL = http://serf.googlecode.com/svn/branches/$(SERF_VER)
+SERF_URL = http://serf.googlecode.com/svn/tags/$(SERF_VER)
SERF_OLD_URL = http://serf.googlecode.com/svn/tags/$(SERF_OLD_VER)
-SQLITE_URL = http://www.sqlite.org/$(SQLITE_DIST)
+SQLITE_URL = http://www.sqlite.org/2013/$(SQLITE_DIST)
CYRUS_SASL_URL = ftp://ftp.andrew.cmu.edu/pub/cyrus-mail/$(CYRUS_SASL_DIST)
LIBMAGIC_URL = ftp://ftp.astron.com/pub/file/$(LIBMAGIC_DIST)
RUBY_URL = http://ftp.ruby-lang.org/pub/ruby/1.8/$(RUBY_DIST)
+BZ2_URL = http://bzip.org/$(BZ2_VER)/$(BZ2_DIST)
+PYTHON_URL = http://python.org/ftp/python/$(PYTHON_VER)/$(PYTHON_DIST)
+JUNIT_URL = http://cloud.github.com/downloads/KentBeck/junit/$(JUNIT_DIST)
+
BDB_SRCDIR = $(SRCDIR)/db-$(BDB_VER)
APR_SRCDIR = $(SRCDIR)/apr-$(APR_VER)
@@ -125,6 +142,8 @@ SQLITE_SRCDIR = $(SRCDIR)/sqlite-autoconf-$(SQLITE_VER)
CYRUS_SASL_SRCDIR = $(SRCDIR)/cyrus-sasl-$(CYRUS_SASL_VER)
LIBMAGIC_SRCDIR = $(SRCDIR)/file-$(LIBMAGIC_VER)
RUBY_SRCDIR = $(SRCDIR)/ruby-$(RUBY_VER)
+BZ2_SRCDIR = $(SRCDIR)/bzip2-$(BZ2_VER)
+PYTHON_SRCDIR = $(SRCDIR)/Python-$(PYTHON_VER)
SVN_SRCDIR = $(SVN_WC)
BDB_OBJDIR = $(OBJDIR)/db-$(BDB_VER)
@@ -140,6 +159,8 @@ SQLITE_OBJDIR = $(OBJDIR)/sqlite-$(SQLITE_VER)
CYRUS_SASL_OBJDIR = $(OBJDIR)/cyrus-sasl-$(CYRUS_SASL_VER)
LIBMAGIC_OBJDIR = $(OBJDIR)/file-$(LIBMAGIC_VER)
RUBY_OBJDIR = $(OBJDIR)/ruby-$(RUBY_VER)
+BZ2_OBJDIR = $(OBJDIR)/bzip2-$(BZ2_VER)
+PYTHON_OBJDIR = $(OBJDIR)/python-$(PYTHON_VER)
SVN_OBJDIR = $(OBJDIR)/$(SVN_REL_WC)
# Tweak this for out-of-tree builds. Note that running individual
@@ -151,6 +172,9 @@ ifdef PROFILE
PROFILE_CFLAGS=-pg
endif
+# We need this to make sure some targets below pick up the right libraries
+LD_LIBRARY_PATH=$(PREFIX)/apr/lib:$(PREFIX)/iconv/lib:$(PREFIX)/bdb/lib:$(PREFIX)/neon/lib:$(PREFIX)/serf/lib:$(PREFIX)/sqlite/lib:$(PREFIX)/cyrus-sasl/lib:$(PREFIX)/iconv/lib:$(PREFIX)/libmagic/lib:$(PREFIX)/ruby/lib:$(PREFIX)/python/lib:$(PREFIX)/svn-$(WC)/lib
+
#######################################################################
# Main targets.
#######################################################################
@@ -160,17 +184,19 @@ endif
all: dirs-create bdb-install apr-install iconv-install apr-util-install \
httpd-install neon-install serf-install serf-old-install \
sqlite-install cyrus-sasl-install libmagic-install \
- ruby-install svn-install svn-bindings-install
+ ruby-install bz2-install python-install \
+ svn-install svn-bindings-install
# Use these to start a build from the beginning.
reset: dirs-reset bdb-reset apr-reset iconv-reset apr-util-reset \
httpd-reset neon-reset serf-reset serf-old-reset sqlite-reset \
- cyrus-sasl-reset libmagic-reset ruby-reset svn-reset
+ cyrus-sasl-reset libmagic-reset ruby-reset python-reset \
+ bz2-reset svn-reset
# Use to save disk space.
clean: bdb-clean apr-clean iconv-clean apr-util-clean httpd-clean \
neon-clean serf-clean serf-old-clean sqlite-clean cyrus-sasl-clean \
- libmagic-clean ruby-clean svn-clean
+ libmagic-clean ruby-clean bz2-clean python-clean svn-clean
# Nukes everything (including installed binaries!)
# Use this to start ALL OVER AGAIN! Use with caution!
@@ -276,26 +302,26 @@ $(APR_OBJDIR)/.retrieved:
fi
touch $@
-ifdef THREADING
+ifeq ($(THREADING),yes)
THREADS_FLAG=--enable-threads
else
THREADS_FLAG=--disable-threads
endif
+ifdef POOL_DEBUG
+POOL_DEBUG_FLAG=--enable-pool-debug=all
+endif
+
# configure apr
$(APR_OBJDIR)/.configured: $(APR_OBJDIR)/.retrieved
- cp $(APR_SRCDIR)/build/apr_hints.m4 \
- $(APR_SRCDIR)/build/apr_hints.m4.orig
- cat $(APR_SRCDIR)/build/apr_hints.m4.orig \
- | sed -e '/^.*APR_ADDTO(CPPFLAGS, \[-D_POSIX_THREADS\]).*$$/d' \
- > $(APR_SRCDIR)/build/apr_hints.m4
cd $(APR_SRCDIR) && ./buildconf
cd $(APR_OBJDIR) \
&& env CFLAGS="-O0 -g $(PROFILE_CFLAGS)" GREP="`which grep`" \
$(APR_SRCDIR)/configure \
--prefix=$(PREFIX)/apr \
--enable-maintainer-mode \
- $(THREADS_FLAG)
+ $(THREADS_FLAG) \
+ $(POOL_DEBUG_FLAG)
touch $@
# compile apr
@@ -598,9 +624,19 @@ $(DISTDIR)/$(NEON_DIST):
cd $(DISTDIR) && $(FETCH_CMD) $(NEON_URL)
# retrieve neon
+NEON_SVN_URL=http://svn.webdav.org/repos/projects/neon/trunk
$(NEON_OBJDIR)/.retrieved: $(DISTDIR)/$(NEON_DIST)
[ -d $(NEON_OBJDIR) ] || mkdir -p $(NEON_OBJDIR)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(NEON_DIST)
+ # fix build with OpenSSL lacking SSLv2 support:
+ cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
+ $(NEON_SVN_URL)/src/ne_openssl.c | patch -p0
+ cd $(NEON_SRCDIR)/src && svn diff -c 1872 \
+ $(NEON_SVN_URL)/src/ne_openssl.c | patch -p0
+ cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
+ $(NEON_SVN_URL)/src/ne_ssl.h | patch -p0
+ cd $(NEON_SRCDIR)/src && svn diff -c 1865 \
+ $(NEON_SVN_URL)/src/ne_session.c | patch -p0
touch $@
# OpenBSD does not have krb5-config in PATH, but the neon port has
@@ -758,7 +794,7 @@ $(SQLITE_OBJDIR)/.retrieved: $(DISTDIR)/$(SQLITE_DIST)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(SQLITE_DIST)
touch $@
-ifdef THREADING
+ifeq ($(THREADING),yes)
THREADSAFE_FLAG=--enable-threadsafe
else
THREADSAFE_FLAG=--disable-threadsafe
@@ -820,6 +856,14 @@ ifeq ($(UNAME),OpenBSD)
done
chmod +x $(CYRUS_SASL_SRCDIR)/configure
endif
+ # Fixes excessive auth log spam from sasl if broken .la files exist
+ sed 's/SASL_LOG_WARN/SASL_LOG_DEBUG/' \
+ < $(CYRUS_SASL_SRCDIR)/lib/dlopen.c \
+ > $(CYRUS_SASL_SRCDIR)/lib/dlopen.c.patched
+ mv $(CYRUS_SASL_SRCDIR)/lib/dlopen.c.patched \
+ $(CYRUS_SASL_SRCDIR)/lib/dlopen.c
+ # Fix a weird autotools error about missing cmulocal dir
+ (cd $(CYRUS_SASL_SRCDIR)/saslauthd/ && ln -sf ../cmulocal)
touch $@
# configure cyrus-sasl
@@ -919,6 +963,12 @@ $(RUBY_OBJDIR)/.retrieved: $(DISTDIR)/$(RUBY_DIST)
tar -C $(SRCDIR) -zxf $(DISTDIR)/$(RUBY_DIST)
touch $@
+ifeq ($(THREADING),yes)
+THREADSAFE_FLAG=--enable-pthread
+else
+THREADSAFE_FLAG=--disable-pthread
+endif
+
# configure ruby
$(RUBY_OBJDIR)/.configured: $(RUBY_OBJDIR)/.retrieved
cd $(RUBY_OBJDIR) \
@@ -926,7 +976,7 @@ $(RUBY_OBJDIR)/.configured: $(RUBY_OBJDIR)/.retrieved
$(RUBY_SRCDIR)/configure \
--prefix=$(PREFIX)/ruby \
--enable-shared \
- --disable-pthread
+ $(THREADSAFE_FLAG)
touch $@
# compile ruby
@@ -934,12 +984,129 @@ $(RUBY_OBJDIR)/.compiled: $(RUBY_OBJDIR)/.configured
(cd $(RUBY_OBJDIR) && make)
touch $@
-# install ruby and the test-unit gem needed to run SVN bindings tests
+# install ruby
$(RUBY_OBJDIR)/.installed: $(RUBY_OBJDIR)/.compiled
(cd $(RUBY_OBJDIR) && make install)
touch $@
#######################################################################
+# bz2
+#######################################################################
+
+bz2-retrieve: $(BZ2_OBJDIR)/.retrieved
+bz2-compile: $(BZ2_OBJDIR)/.compiled
+bz2-install: $(BZ2_OBJDIR)/.installed
+bz2-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(BZ2_OBJDIR)/$(f);)
+
+bz2-clean:
+ -(cd $(BZ2_SRCDIR) && make distclean)
+
+# fetch distfile for bz2
+$(DISTDIR)/$(BZ2_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(BZ2_URL)
+
+# retrieve bz2
+$(BZ2_OBJDIR)/.retrieved: $(DISTDIR)/$(BZ2_DIST)
+ [ -d $(BZ2_OBJDIR) ] || mkdir -p $(BZ2_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(BZ2_DIST)
+ touch $@
+
+# compile bz2
+$(BZ2_OBJDIR)/.compiled: $(BZ2_OBJDIR)/.retrieved
+ (cd $(BZ2_SRCDIR) && make CFLAGS="-g $(PROFILE_CFLAGS) -fPIC")
+ touch $@
+
+# install bz2
+$(BZ2_OBJDIR)/.installed: $(BZ2_OBJDIR)/.compiled
+ (cd $(BZ2_SRCDIR) && make install PREFIX=$(PREFIX)/bz2)
+ touch $@
+
+
+#######################################################################
+# python
+#######################################################################
+
+python-retrieve: $(PYTHON_OBJDIR)/.retrieved
+python-configure: $(PYTHON_OBJDIR)/.configured
+python-compile: $(PYTHON_OBJDIR)/.compiled
+python-install: $(PYTHON_OBJDIR)/.installed
+python-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(PYTHON_OBJDIR)/$(f);)
+
+python-clean:
+ -(cd $(PYTHON_OBJDIR) && make distclean)
+
+# fetch distfile for python
+$(DISTDIR)/$(PYTHON_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(PYTHON_URL)
+
+# retrieve python
+#
+$(PYTHON_OBJDIR)/.retrieved: $(DISTDIR)/$(PYTHON_DIST)
+ [ -d $(PYTHON_OBJDIR) ] || mkdir -p $(PYTHON_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(PYTHON_DIST)
+ # Make setup.py use our own dependencies instead of system ones
+ sed -e "s#sqlite_inc_paths = \[ '/usr/include',#sqlite_inc_paths = [ '$(PREFIX)/sqlite/include',#" \
+ -e "s#'/usr/include/db4'#'$(PREFIX)/bdb/include'#" \
+ -e "s|\(add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')\)|#\1|" \
+ -e "s|\(add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')\)|#\1|" \
+ -e "s#find_library_file(lib_dirs, 'bz2'#find_library_file(['$(PREFIX)/bz2/lib'] + lib_dirs, 'bz2'#" \
+ < $(PYTHON_SRCDIR)/setup.py \
+ > $(PYTHON_SRCDIR)/setup.py.patched
+ mv $(PYTHON_SRCDIR)/setup.py.patched $(PYTHON_SRCDIR)/setup.py
+ chmod +x $(PYTHON_SRCDIR)/setup.py
+ # Fixes shared library linking on OpenBSD
+ # http://bugs.python.org/issue12560
+ # Also fix compilation error on OpenBSD 5.0 and later (undefined
+ # reference to 'lstat' -- already fixed in hg.python.org/cpython).
+ sed -e '4930s#NetBSD\*|#NetBSD*|OpenBSD*|#' \
+ -e 's#OpenBSD/4\.\[789\]#OpenBSD/*#' \
+ < $(PYTHON_SRCDIR)/configure \
+ > $(PYTHON_SRCDIR)/configure.patched
+ mv $(PYTHON_SRCDIR)/configure.patched $(PYTHON_SRCDIR)/configure
+ chmod +x $(PYTHON_SRCDIR)/configure
+ touch $@
+
+# configure python
+ifdef PROFILE
+PYTHON_PROFILING=--enable-profiling
+endif
+$(PYTHON_OBJDIR)/.configured: $(PYTHON_OBJDIR)/.retrieved
+ cd $(PYTHON_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ CPPFLAGS="-I$(PREFIX)/bz2/include" \
+ LDFLAGS="-Wl,-rpath=$(PREFIX)/python/lib -L$(PREFIX)/bz2/lib" \
+ $(PYTHON_SRCDIR)/configure \
+ --prefix=$(PREFIX)/python \
+ --enable-shared \
+ --with-system-expat \
+ --with-dbmliborder=bdb \
+ $(PYTHON_PROFILING)
+ touch $@
+
+# compile python
+$(PYTHON_OBJDIR)/.compiled: $(PYTHON_OBJDIR)/.configured
+ (cd $(PYTHON_OBJDIR) && make)
+ touch $@
+
+# install python
+$(PYTHON_OBJDIR)/.installed: $(PYTHON_OBJDIR)/.compiled
+ (cd $(PYTHON_OBJDIR) && make install)
+ touch $@
+
+
+#######################################################################
+# junit
+#######################################################################
+
+# fetch distfile for junit
+$(DISTDIR)/$(JUNIT_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(JUNIT_URL)
+
+#######################################################################
# svn
#######################################################################
@@ -983,12 +1150,22 @@ $(SVN_OBJDIR)/.retrieved:
fi
touch $@
-ifeq ($(BRANCH_MAJOR),1.6)
+ifeq ($(BRANCH_MAJOR),1.7)
+BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
+SERF_FLAG=--with-serf="$(PREFIX)/serf"
+MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
+else ifeq ($(BRANCH_MAJOR),1.6)
BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
SERF_FLAG=--with-serf="$(PREFIX)/serf"
MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
W_NO_SYSTEM_HEADERS=-Wno-system-headers
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
else ifeq ($(BRANCH_MAJOR),1.5)
BDB_FLAG=$(PREFIX)/bdb
SERF_FLAG=--with-serf="$(PREFIX)/serf-old"
@@ -996,49 +1173,22 @@ MOD_DAV_SVN=modules/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/mod_authz_svn.so
DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else ifeq ($(BRANCH_MAJOR),1.4)
-BDB_FLAG=$(PREFIX)/bdb
-MOD_DAV_SVN=modules/mod_dav_svn.so
-MOD_AUTHZ_SVN=modules/mod_authz_svn.so
-DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
-W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else ifeq ($(BRANCH_MAJOR),1.3)
-BDB_FLAG=$(PREFIX)/bdb
-MOD_DAV_SVN=modules/mod_dav_svn.so
-MOD_AUTHZ_SVN=modules/mod_authz_svn.so
-DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
-W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else ifeq ($(BRANCH_MAJOR),1.2)
-BDB_FLAG=$(PREFIX)/bdb
-MOD_DAV_SVN=modules/mod_dav_svn.so
-MOD_AUTHZ_SVN=modules/mod_authz_svn.so
-DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
-W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else ifeq ($(BRANCH_MAJOR),1.1)
-BDB_FLAG=$(PREFIX)/bdb
-MOD_DAV_SVN=modules/mod_dav_svn.so
-MOD_AUTHZ_SVN=modules/mod_authz_svn.so
-DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
-W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else ifeq ($(BRANCH_MAJOR),1.0)
-BDB_FLAG=$(PREFIX)/bdb
-MOD_DAV_SVN=modules/mod_dav_svn.so
-MOD_AUTHZ_SVN=modules/mod_authz_svn.so
-DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
-W_NO_SYSTEM_HEADERS=-Wno-system-headers
-else
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
+else # 1.8
BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
SERF_FLAG=--with-serf="$(PREFIX)/serf"
MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
+JAVAHL_CHECK_TARGET=check-all-javahl
endif
ifeq ($(ENABLE_JAVA_BINDINGS),yes)
JAVAHL_FLAG=--enable-javahl=yes --with-jdk --with-jikes=no \
- --with-junit=$(PWD)/junit.jar
+ --with-junit=$(DISTDIR)/$(JUNIT_DIST)
else
- JAVAHL_FLAG=--enable-javahl=no
+ JAVAHL_FLAG=--with-jdk=no
endif
ifdef PROFILE
@@ -1050,26 +1200,20 @@ SVN_WITH_SASL=--with-sasl="$(PREFIX)/cyrus-sasl"
endif
# configure svn
-$(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved
- @if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
- if [ ! -e $(PWD)/junit.jar ]; then \
- echo "Please provide $(PWD)/junit.jar"; \
- exit 1; \
- fi; \
- fi
+$(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved $(DISTDIR)/$(JUNIT_DIST)
cd $(SVN_SRCDIR) && ./autogen.sh
cd $(svn_builddir) && \
env LDFLAGS="-L$(PREFIX)/neon/lib -L$(PREFIX)/apr/lib" \
- LD_LIBRARY_PATH="$(PREFIX)/bdb/lib:$(PREFIX)/iconv/lib:$$LD_LIBRARY_PATH" \
+ LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
GREP="`which grep`" \
- PATH=$(PREFIX)/ruby/bin:$$PATH \
+ PATH=$(PREFIX)/ruby/bin:$(PREFIX)/python/bin:$$PATH \
$(SVN_SRCDIR)/configure \
--enable-maintainer-mode \
--with-ssl \
--prefix="$(SVN_PREFIX)" \
--with-apr="$(PREFIX)/apr" \
--with-apr-util="$(PREFIX)/apr" \
- --with-neon="$(PREFIX)/neon" \
+ $(NEON_FLAG) \
$(SVN_WITH_HTTPD) \
$(SVN_WITH_SASL) \
$(SERF_FLAG) \
@@ -1095,16 +1239,27 @@ $(SVN_OBJDIR)/.installed: $(SVN_OBJDIR)/.compiled
&& make install
touch $@
-$(SVN_OBJDIR)/.bindings-compiled: $(SVN_OBJDIR)/.installed
- if [ $(ENABLE_PYTHON_BINDINGS) = yes ]; then \
- cd $(svn_builddir) \
- && make swig-py; \
- fi
+# SWIG 1.x and 2.x are not compatible. If SWIG 2.x is used to generated .swg
+# files and 1.x is used to build the bindings, the Python bindings fail to
+# load with errors such as "undefined symbol 'SWIG_Python_str_AsChar'".
+# So clean any pre-generated .swg files to make sure everything is done
+# by the same version of SWIG.
+$(SVN_OBJDIR)/.pre-generated-swig-cleaned:
+ -cd $(svn_builddir) \
+ && make extraclean-swig
+ touch $@
+
+$(SVN_OBJDIR)/.bindings-compiled: $(SVN_OBJDIR)/.installed $(SVN_OBJDIR)/.pre-generated-swig-cleaned
+ cd $(svn_builddir) \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ make swig-py
cd $(svn_builddir) && \
- PATH=$(PREFIX)/ruby/bin:$$PATH make swig-rb
+ env PATH=$(PREFIX)/ruby/bin:$$PATH \
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) make swig-rb
if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
- && make swig-pl; \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ make swig-pl; \
fi
if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
@@ -1113,12 +1268,12 @@ $(SVN_OBJDIR)/.bindings-compiled: $(SVN_OBJDIR)/.installed
touch $@
$(SVN_OBJDIR)/.bindings-installed: $(SVN_OBJDIR)/.bindings-compiled
- if [ $(ENABLE_PYTHON_BINDINGS) = yes ]; then \
- cd $(svn_builddir) \
- && make install-swig-py; \
- fi
+ cd $(svn_builddir) \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ make install-swig-py
cd $(svn_builddir) && \
- PATH=$(PREFIX)/ruby/bin:$$PATH make install-swig-rb
+ env PATH=$(PREFIX)/ruby/bin:$$PATH \
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) make install-swig-rb
if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
cd $(svn_builddir) \
&& make install-swig-pl-lib; \
@@ -1203,22 +1358,28 @@ endif
echo >>$@.tmp 'RedirectMatch ^/svn-test-work/repositories/REDIRECT-TEMP-(.*)$$ /svn-test-work/repositories/$$1'
mv -f $@.tmp $@
-# We need this to make sure some targets below pick up the right libraries
-LD_LIBRARY_PATH=$(PREFIX)/apr/lib:$(PREFIX)/iconv/lib:$(PREFIX)/bdb/lib:$(PREFIX)/neon/lib:$(PREFIX)/serf/lib:$(PREFIX)/sqlite/lib:$(PREFIX)/cyrus-sasl/lib:$(PREFIX)/iconv/lib:$(PREFIX)/libmagic/lib:$(PREFIX)/ruby/lib:$(PREFIX)/svn-$(WC)/lib
-
.PHONY: libpath
libpath:
- @echo export LD_LIBRARY_PATH=$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH
+ @echo export LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ "PYTHONPATH=$(SVN_PREFIX)/lib/svn-python"
+#
+# OpenBSD requires an LD_PRELOAD hack to dlopen() libraries linked to
+# libpthread (e.g. libsvn_auth_gnome_keyring.so) into executables that
+# aren't linked to libpthread.
+ifeq ($(UNAME),OpenBSD)
+LIB_PTHREAD_HACK=LD_PRELOAD=libpthread.so
+endif
.PHONY: start-svnserve stop-svnserve start-httpd stop-httpd
-HTTPD_CMD = env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+HTTPD_CMD = env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) $(LIB_PTHREAD_HACK) \
$(PREFIX)/httpd/bin/apachectl -f $(HTTPD_CHECK_CONF)
HTTPD_START_CMD = $(HTTPD_CMD) -k start
HTTPD_START_CMD_DEBUG = $(HTTPD_START_CMD) -X
HTTPD_STOP_CMD = $(HTTPD_CMD) -k stop; sleep 3
-SVNSERVE_START_CMD = (ls $(PWD)/svnserve-*.pid | while read pidfile; do \
+SVNSERVE_START_CMD = (test -e $(PWD)/svnserve-*.pid && \
+ ls $(PWD)/svnserve-*.pid | while read pidfile; do \
kill `cat "$$pidfile"`; sleep 3; \
rm -f $$pidfile; \
done); \
@@ -1259,7 +1420,8 @@ define do_check
-cd $(svn_builddir) && for fs in fsfs bdb; do \
echo "Begin test: $(subst svn-check-,,$@) x $$fs"; \
test -d "$(RAMDISK)/tmp" && export TMPDIR="$(RAMDISK)/tmp"; \
- make check PARALLEL=$(PARALLEL) CLEANUP=$(CLEANUP) $1 FS_TYPE=$$fs; \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) $(LIB_PTHREAD_HACK) \
+ make check PARALLEL=$(PARALLEL) CLEANUP=$(CLEANUP) $1 FS_TYPE=$$fs; \
for log in tests.log fails.log; do \
test -f $$log && mv -f $$log $$log.$@-$$fs; \
done; \
@@ -1277,10 +1439,16 @@ svn-check-prepare-ramdisk:
mkdir -p "$(RAMDISK)/tmp"; \
fi
+ifndef NEON_FLAG
+svn-check-neon:
+ @echo Neon is not supported by this build of Subversion, skipping tests
+ @true
+else
svn-check-neon: $(HTTPD_CHECK_CONF) $(SVN_OBJDIR)/.compiled $(SVN_OBJDIR)/.bindings-compiled svn-check-prepare-ramdisk
$(HTTPD_START_CMD)
$(call do_check,BASE_URL=http://localhost:$(HTTPD_CHECK_PORT) HTTP_LIBRARY=neon)
$(HTTPD_STOP_CMD)
+endif
svn-check-serf: $(HTTPD_CHECK_CONF) $(SVN_OBJDIR)/.compiled $(SVN_OBJDIR)/.bindings-compiled svn-check-prepare-ramdisk
$(HTTPD_START_CMD)
@@ -1308,34 +1476,33 @@ svn-check-swig-pl:
-if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
(cd $(svn_builddir) && \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ $(LIB_PTHREAD_HACK) \
make check-swig-pl 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.pl; \
fi
svn-check-swig-py:
- -if [ $(ENABLE_PYTHON_BINDINGS) = yes ]; then \
- (cd $(svn_builddir) && \
- env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make check-swig-py 2>&1) | \
- tee $(svn_builddir)/tests.log.bindings.py; \
- fi
+ -(cd $(svn_builddir) && \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ make check-swig-py 2>&1) | \
+ tee $(svn_builddir)/tests.log.bindings.py
# We add the svn prefix to PATH here because the ruby tests
# attempt to start an svnserve binary found in PATH.
svn-check-swig-rb:
- (cd $(svn_builddir)/subversion/bindings/swig/ruby/test && \
+ (cd $(svn_builddir) && \
env RUBYLIB=$(RUBYLIB) \
LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
PATH=$(SVN_PREFIX)/bin:$$PATH \
- $(PREFIX)/ruby/bin/ruby run-test.rb \
- --verbose=verbose 2>&1) | \
+ $(LIB_PTHREAD_HACK) \
+ make check-swig-rb 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.rb
svn-check-javahl:
-if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
(cd $(svn_builddir) && \
env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
- make check-javahl 2>&1) | \
+ make $(JAVAHL_CHECK_TARGET) 2>&1) | \
tee $(svn_builddir)/tests.log.bindings.javahl; \
fi
@@ -1343,13 +1510,17 @@ svn-check: svn-check-prepare-ramdisk svn-check-local svn-check-svn \
svn-check-neon svn-check-serf svn-check-bindings
.PHONY: sign-email
+ifdef NEON_FLAG
+NEON_STR=ra_neon |
+NEON_VER_LINE=@echo "neon: $(NEON_VER)"
+endif
sign-email:
@echo "Summary: +1 to release"
@echo ""
- @echo "Tested: [bdb | fsfs] x [ra_local | ra_svn | ra_neon | ra_serf]"
+ @echo "Tested: [bdb | fsfs] x [ra_local | ra_svn | $(NEON_STR)ra_serf]"
@echo " swig bindings"
ifeq ($(ENABLE_JAVA_BINDINGS),yes)
- @echo " javahl bindings"
+ @echo " javahl bindings"
endif
@echo ""
@echo "Test results: All passed."
@@ -1366,13 +1537,13 @@ endif
@echo "apr: $(APR_VER)"
@echo "apr-util: $(APR_UTIL_VER)"
@echo "httpd: $(HTTPD_VER)"
- @echo "neon: $(NEON_VER)"
+ $(NEON_VER_LINE)
@echo "serf: $(SERF_VER)"
@echo "cyrus-sasl: $(CYRUS_SASL_VER)"
@echo "sqlite: $(SQLITE_VER)"
@echo "openssl: `openssl version | cut -d' ' -f2`"
@echo "swig: `swig -version | grep Version | cut -d' ' -f3`"
- @echo "python: `python --version 2>&1 | cut -d' ' -f2`"
+ @echo "python: $(PYTHON_VER)"
@echo "perl: `eval \`perl -V:version\`; echo $$version`"
@echo "ruby: $(RUBY_VER)"
ifeq ($(ENABLE_JAVA_BINDINGS),yes)
@@ -1380,3 +1551,9 @@ ifeq ($(ENABLE_JAVA_BINDINGS),yes)
endif
@echo ""
@echo "Signatures:"
+ @echo
+ @echo "subversion-$(TAG).tar.gz"
+ @echo "`cat subversion-$(TAG).tar.gz.asc`"
+ @echo
+ @echo "subversion-$(TAG).tar.bz2"
+ @echo "`cat subversion-$(TAG).tar.bz2.asc`"
diff --git a/tools/dev/unix-build/README b/tools/dev/unix-build/README
index 6110d8e..13cdc42 100644
--- a/tools/dev/unix-build/README
+++ b/tools/dev/unix-build/README
@@ -22,12 +22,24 @@ repository.
Usage
=====
-First, choose a directory $(SVN_DEV) to set up the environment. Note
-that this directory cannot be changed later because the script
-hardcodes build and link paths relative to the current working
-directory.
+First, choose a directory $(SVN_DEV) to set up the environment.
+For example, $(SVN_DEV) could be the directory "~/svn".
+Note that this directory cannot be changed later because the script
+hardcodes build and link paths relative to the current working directory.
+
+ $ mkdir $(SVN_DEV)
+
+Now change into this directory and make the Makefile available in it:
+
+ $ cd $(SVN_DEV)
+ $ svn checkout https://svn.apache.org/repos/asf/subversion/trunk/tools/dev/unix-build
+ $ ln -s unix-build/Makefile.svn Makefile
+
+To fetch and build trunk, simply don't pass anything, just run 'make':
+
+ $ cd $(SVN_DEV)
+ $ make
-To fetch and build trunk, simply don't pass anything.
Pass the branch you want to build in BRANCH, e.g.
$ make BRANCH="1.5.x"
You can also pass a tag to build:
@@ -39,19 +51,37 @@ than one working copy of the same branch:
When the script has finished fetching and building, it uses
$(SVN_DEV)/prefix to install Subversion libraries and
binaries. $(SVN_DEV)/prefix/svn-trunk (or whatever you choose to
-build) will contain the latest Subversion binaries: you should add
-$(SVN_DEV)/prefix/svn-trunk/bin to your $PATH to use them. The
-Makefile in $(SVN_DEV)/svn-trunk is configured to build with sane
+build) will contain the latest Subversion binaries. You can add
+$(SVN_DEV)/prefix/svn-trunk/bin to your $PATH to use them:
+
+ $ export PATH="$(SVN_DEV)/prefix/svn-trunk/bin:$PATH"
+
+The Makefile in $(SVN_DEV)/svn-trunk is configured to build with sane
options: while developing Subversion, simply `svn up` to pull the
latest changes, `make` and `make install` to install the binaries in
-$(SVN_DEV)/prefix/svn-trunk.
-
-If at any point, you want to re-configure any of the packages to the
-default configuration in Makefile.svn, just run the "<PACKAGE>-reset"
-target in Makefile.svn before trying to rebuild again. If, in the
-extreme case, you want to remove everything including the installed
-binaries effectively returning to the starting point, use the "nuke"
-target.
+$(SVN_DEV)/prefix/svn-trunk. This usually works fine. If not, you may
+need to use the 'svn-reset' target and recompile everything.
+
+If at any point, you want to recompile any of the packages with the
+default configuration in Makefile.svn, use the *-clean and *-reset
+target in Makefile.svn before trying to rebuild again. For example:
+
+ $ make svn-clean
+ $ make svn-reset
+ $ make
+
+Or, if you want to recompile svn and all dependencies:
+
+ $ make clean
+ $ make reset
+ $ make
+
+If you want to remove everything including the installed binaries effectively
+returning to the starting point, use the "nuke" target (BE CAREFUL, this will
+remove the 'svn' binary compiled from trunk which you might need to manage
+existing working copies):
+
+ $ make nuke
Extended usage
==============
diff --git a/tools/dev/wc-format.py b/tools/dev/wc-format.py
index 158f529..fc6ef07 100755
--- a/tools/dev/wc-format.py
+++ b/tools/dev/wc-format.py
@@ -51,7 +51,7 @@ def print_format(wc_path):
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
- # 1.7.x: format XXX
+ # 1.7.x: format 29
formatno = get_format(wc_path)
print '%s: %s' % (wc_path, formatno)
diff --git a/tools/dev/which-error.py b/tools/dev/which-error.py
index 55abba7..dc6a8f5 100755
--- a/tools/dev/which-error.py
+++ b/tools/dev/which-error.py
@@ -23,12 +23,13 @@
# under the License.
# ====================================================================
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/dev/which-error.py $
-# $LastChangedDate: 2011-07-08 13:53:27 +0000 (Fri, 08 Jul 2011) $
-# $LastChangedBy: philip $
-# $LastChangedRevision: 1144315 $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/dev/which-error.py $
+# $LastChangedDate: 2012-03-30 20:29:32 +0000 (Fri, 30 Mar 2012) $
+# $LastChangedBy: danielsh $
+# $LastChangedRevision: 1307598 $
#
+import errno
import sys
import os.path
import re
@@ -68,6 +69,13 @@ codes. This can be done in variety of ways:
def get_errors():
errs = {}
+ ## errno values.
+ errs.update(errno.errorcode)
+ ## APR-defined errors, from apr_errno.h.
+ for line in open(os.path.join(os.path.dirname(sys.argv[0]), 'aprerr.txt')):
+ key, _, val = line.split()
+ errs[int(val)] = key
+ ## Subversion errors, from svn_error_codes.h.
for key in vars(core):
if key.find('SVN_ERR_') == 0:
try:
@@ -81,7 +89,10 @@ def print_error(code):
try:
print('%08d %s' % (code, __svn_error_codes[code]))
except KeyError:
- print('%08d *** UNKNOWN ERROR CODE ***' % (code))
+ if code == -41:
+ print("Sit by a lake.")
+ else:
+ print('%08d *** UNKNOWN ERROR CODE ***' % (code))
if __name__ == "__main__":
global __svn_error_codes
diff --git a/tools/dev/windows-build/Makefile b/tools/dev/windows-build/Makefile
index fbf7b22..c0e1b15 100644
--- a/tools/dev/windows-build/Makefile
+++ b/tools/dev/windows-build/Makefile
@@ -31,7 +31,7 @@ INSTALLDIR=E:\svn
EXPATVER=2.0.0
HTTPDVER=2.2.13
-NEONVER=0.28.2
+SERFVER=1.1.0
OPENSSLVER=0.9.8k
SQLITEVER=3.6.3
ZLIBVER=1.2.3
@@ -40,7 +40,6 @@ ZLIBVER=1.2.3
PATCHESDIR=$(HOME)\mydiffs\svn
OPENSSLDIR=$(SOURCESDIR)\openssl-$(OPENSSLVER)
EXPATDIR=$(SOURCESDIR)\expat-$(EXPATVER)
-NEONDIR=$(SOURCESDIR)\neon-$(NEONVER)
HTTPDDIR=$(SOURCESDIR)\httpd-$(HTTPDVER)
#APRDIR=$(SOURCESDIR)\apr
#APRUTILDIR=$(SOURCESDIR)\apr-util
@@ -50,7 +49,7 @@ APRUTILDIR=$(HTTPDDIR)\srclib\apr-util
APRICONVDIR=$(HTTPDDIR)\srclib\apr-iconv
SQLITEDIR=$(SOURCESDIR)\sqlite-amalgamation
ZLIBDIR=$(SOURCESDIR)\zlib-$(ZLIBVER)
-SERFDIR=$(SOURCESDIR)\serf
+SERFDIR=$(SOURCESDIR)\serf-$(SERFVER)
all:
@echo Available targets: newfiles versionstamp
@@ -101,13 +100,13 @@ targetdir: TARGETDIRset
# TODO: pass --with-apr-* if you don't have httpd; make --with-* args optional
config: targetdir
- python gen-make.py --$(CONFIG) --with-httpd=$(HTTPDDIR) --with-neon=$(NEONDIR) --with-serf=$(SERFDIR) --with-openssl=$(OPENSSLDIR) --with-sqlite=$(SQLITEDIR) --with-zlib=$(ZLIBDIR) $(ENABLE_ML) --vsnet-version=2008 -t vcproj 2>&1 | tee log.gen-make
+ python gen-make.py --$(CONFIG) --with-httpd=$(HTTPDDIR) --with-serf=$(SERFDIR) --with-openssl=$(OPENSSLDIR) --with-sqlite=$(SQLITEDIR) --with-zlib=$(ZLIBDIR) $(ENABLE_ML) --vsnet-version=2008 -t vcproj 2>&1 | tee log.gen-make
# Visual Studio 2008
-libsvn_auth_gnome_keyring libsvn_auth_kwallet libsvn_client libsvn_delta libsvn_diff libsvn_fs libsvn_fs_base libsvn_fs_fs libsvn_fs_util libsvn_ra libsvn_ra_local libsvn_ra_neon libsvn_ra_serf libsvn_ra_svn libsvn_repos libsvn_subr libsvn_wc: targetdir
+libsvn_auth_gnome_keyring libsvn_auth_kwallet libsvn_client libsvn_delta libsvn_diff libsvn_fs libsvn_fs_base libsvn_fs_fs libsvn_fs_util libsvn_ra libsvn_ra_local libsvn_ra_serf libsvn_ra_svn libsvn_repos libsvn_subr libsvn_wc: targetdir
$(MSBUILD) /t:Libraries\$@
$(MAKE) package
-svn svnadmin svndumpfilter svnlook svnmucc svnserve svnsync svnversion entries-dump: targetdir
+svn svnadmin svndumpfilter svnlook svnmucc svnserve svnsync svnversion svnrdump entries-dump: targetdir
$(MSBUILD) /t:Programs\$@
$(MAKE) package
auth-test cache-test changes-test checksum-test client-test compat-test config-test db-test diff-diff3-test dir-delta-editor dirent_uri-test error-test fs-base-test fs-pack-test fs-test hashdump-test key-test locks-test mergeinfo-test opt-test path-test ra-local-test random-test repos-test revision-test skel-test stream-test string-test strings-reps-test svn_test_fs svn_test_main svndiff-test target-test time-test translate-test tree-conflict-data-test utf-test vdelta-test window-test: targetdir
@@ -128,7 +127,7 @@ all2: targetdir
package:
test -d $(SVNDIR)\$(CONFIG)\Subversion\tests\cmdline || mkdir $(SVNDIR)\$(CONFIG)\Subversion\tests\cmdline
test -d $(TARGETDIR)\bin || mkdir $(TARGETDIR)\bin
- for %%i in (svn svnadmin svndumpfilter svnlook svnserve svnsync svnversion) do @$(CP) $(CONFIG)\subversion\%%i\%%i.exe $(TARGETDIR)\bin
+ for %%i in (svn svnadmin svndumpfilter svnlook svnserve svnsync svnversion svnrdump svnmucc) do @$(CP) $(CONFIG)\subversion\%%i\%%i.exe $(TARGETDIR)\bin
for %%i in (diff diff3 diff4) do @if exist $(CONFIG)\tools\diff\%%i.exe $(CP) $(CONFIG)\tools\diff\%%i.exe $(TARGETDIR)\bin
$(CP) $(APRDIR)\$(CONFIG)/*.dll $(TARGETDIR)\bin
$(CP) $(APRUTILDIR)\$(CONFIG)/*.dll $(TARGETDIR)\bin
diff --git a/tools/dev/windows-build/README b/tools/dev/windows-build/README
index a4d37d8..cd05cd8 100644
--- a/tools/dev/windows-build/README
+++ b/tools/dev/windows-build/README
@@ -1,5 +1,9 @@
Makefiles for automating the Windows build.
+Should work either either nmake or GNU make.
+
+Doesn't require Cygwin.
+
* TODO:
- document: how to use
- known bugs/shortcomings
@@ -11,3 +15,8 @@ Makefiles for automating the Windows build.
See: http://svn.haxx.se/users/archive-2009-07/0764.shtml
(Message-Id: <alpine.561.2.00.0907241718550.6824@daniel2.local>)
+
+
+'cp' and friends can be obtained from gnuwin32.sf.net, unxutils.sf.net,
+cygwin, etc. Or tweak the makefile to use cp.pl or the built-in 'copy'
+command instead.:
diff --git a/tools/diff/diff.c b/tools/diff/diff.c
index d681381..c4b5e9d 100644
--- a/tools/diff/diff.c
+++ b/tools/diff/diff.c
@@ -89,6 +89,8 @@ int main(int argc, const char *argv[])
options_array = apr_array_make(pool, 0, sizeof(const char *));
+ diff_options = svn_diff_file_options_create(pool);
+
for (i = 1 ; i < argc ; i++)
{
if (!no_more_options && (argv[i][0] == '-'))
@@ -105,6 +107,11 @@ int main(int argc, const char *argv[])
show_c_function = TRUE;
continue;
}
+ if (argv[i][1] == 'w' && !argv[i][2])
+ {
+ diff_options->ignore_space = svn_diff_file_ignore_space_all;
+ continue;
+ }
APR_ARRAY_PUSH(options_array, const char *) = argv[i];
}
else
@@ -127,8 +134,6 @@ int main(int argc, const char *argv[])
return 2;
}
- diff_options = svn_diff_file_options_create(pool);
-
svn_err = svn_diff_file_options_parse(diff_options, options_array, pool);
if (svn_err)
{
diff --git a/tools/dist/_gnupg.py b/tools/dist/_gnupg.py
new file mode 100644
index 0000000..5c6dd1c
--- /dev/null
+++ b/tools/dist/_gnupg.py
@@ -0,0 +1,1035 @@
+# Copyright (c) 2008-2011 by Vinay Sajip.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name(s) of the copyright holder(s) may not be used to endorse or
+# promote products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" A wrapper for the 'gpg' command::
+
+Portions of this module are derived from A.M. Kuchling's well-designed
+GPG.py, using Richard Jones' updated version 1.3, which can be found
+in the pycrypto CVS repository on Sourceforge:
+
+http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
+
+This module is *not* forward-compatible with amk's; some of the
+old interface has changed. For instance, since I've added decrypt
+functionality, I elected to initialize with a 'gnupghome' argument
+instead of 'keyring', so that gpg can find both the public and secret
+keyrings. I've also altered some of the returned objects in order for
+the caller to not have to know as much about the internals of the
+result classes.
+
+While the rest of ISconf is released under the GPL, I am releasing
+this single file under the same terms that A.M. Kuchling used for
+pycrypto.
+
+Steve Traugott, stevegt@terraluna.org
+Thu Jun 23 21:27:20 PDT 2005
+
+This version of the module has been modified from Steve Traugott's version
+(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
+Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
+and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
+the previous versions.
+
+Modifications Copyright (C) 2008-2011 Vinay Sajip. All rights reserved.
+
+A unittest harness (test_gnupg.py) has also been added.
+"""
+import locale
+
+__author__ = "Vinay Sajip"
+__date__ = "$02-Sep-2011 13:18:12$"
+
+try:
+ from io import StringIO
+except ImportError:
+ from cStringIO import StringIO
+
+import codecs
+import locale
+import logging
+import os
+import socket
+from subprocess import Popen
+from subprocess import PIPE
+import sys
+import threading
+
+try:
+ import logging.NullHandler as NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def handle(self, record):
+ pass
+try:
+ unicode
+ _py3k = False
+except NameError:
+ _py3k = True
+
+logger = logging.getLogger(__name__)
+if not logger.handlers:
+ logger.addHandler(NullHandler())
+
+def _copy_data(instream, outstream):
+ # Copy one stream to another
+ sent = 0
+ if hasattr(sys.stdin, 'encoding'):
+ enc = sys.stdin.encoding
+ else:
+ enc = 'ascii'
+ while True:
+ data = instream.read(1024)
+ if len(data) == 0:
+ break
+ sent += len(data)
+ logger.debug("sending chunk (%d): %r", sent, data[:256])
+ try:
+ outstream.write(data)
+ except UnicodeError:
+ outstream.write(data.encode(enc))
+ except:
+ # Can sometimes get 'broken pipe' errors even when the data has all
+ # been sent
+ logger.exception('Error sending data')
+ break
+ try:
+ outstream.close()
+ except IOError:
+ logger.warning('Exception occurred while closing: ignored', exc_info=1)
+ logger.debug("closed output, %d bytes sent", sent)
+
+def _threaded_copy_data(instream, outstream):
+ wr = threading.Thread(target=_copy_data, args=(instream, outstream))
+ wr.setDaemon(True)
+ logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
+ wr.start()
+ return wr
+
+def _write_passphrase(stream, passphrase, encoding):
+ passphrase = '%s\n' % passphrase
+ passphrase = passphrase.encode(encoding)
+ stream.write(passphrase)
+ logger.debug("Wrote passphrase: %r", passphrase)
+
+def _is_sequence(instance):
+ return isinstance(instance,list) or isinstance(instance,tuple)
+
+def _make_binary_stream(s, encoding):
+ try:
+ if _py3k:
+ if isinstance(s, str):
+ s = s.encode(encoding)
+ else:
+ if type(s) is not str:
+ s = s.encode(encoding)
+ from io import BytesIO
+ rv = BytesIO(s)
+ except ImportError:
+ rv = StringIO(s)
+ return rv
+
+class Verify(object):
+ "Handle status messages for --verify"
+
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.valid = False
+ self.fingerprint = self.creation_date = self.timestamp = None
+ self.signature_id = self.key_id = None
+ self.username = None
+
+ def __nonzero__(self):
+ return self.valid
+
+ __bool__ = __nonzero__
+
+ def handle_status(self, key, value):
+ if key in ("TRUST_UNDEFINED", "TRUST_NEVER", "TRUST_MARGINAL",
+ "TRUST_FULLY", "TRUST_ULTIMATE", "RSA_OR_IDEA", "NODATA",
+ "IMPORT_RES", "PLAINTEXT", "PLAINTEXT_LENGTH"):
+ pass
+ elif key == "BADSIG":
+ self.valid = False
+ self.status = 'signature bad'
+ self.key_id, self.username = value.split(None, 1)
+ elif key == "GOODSIG":
+ self.valid = True
+ self.status = 'signature good'
+ self.key_id, self.username = value.split(None, 1)
+ elif key == "VALIDSIG":
+ (self.fingerprint,
+ self.creation_date,
+ self.sig_timestamp,
+ self.expire_timestamp) = value.split()[:4]
+ # may be different if signature is made with a subkey
+ self.pubkey_fingerprint = value.split()[-1]
+ self.status = 'signature valid'
+ elif key == "SIG_ID":
+ (self.signature_id,
+ self.creation_date, self.timestamp) = value.split()
+ elif key == "ERRSIG":
+ self.valid = False
+ (self.key_id,
+ algo, hash_algo,
+ cls,
+ self.timestamp) = value.split()[:5]
+ self.status = 'signature error'
+ elif key == "NO_PUBKEY":
+ self.valid = False
+ self.key_id = value
+ self.status = 'no public key'
+ elif key in ("KEYEXPIRED", "SIGEXPIRED"):
+ # these are useless in verify, since they are spit out for any
+ # pub/subkeys on the key, not just the one doing the signing.
+ # if we want to check for signatures with expired key,
+ # the relevant flag is EXPKEYSIG.
+ pass
+ elif key in ("EXPKEYSIG", "REVKEYSIG"):
+ # signed with expired or revoked key
+ self.valid = False
+ self.key_id = value.split()[0]
+ self.status = (('%s %s') % (key[:3], key[3:])).lower()
+ else:
+ raise ValueError("Unknown status message: %r" % key)
+
+class ImportResult(object):
+ "Handle status messages for --import"
+
+ counts = '''count no_user_id imported imported_rsa unchanged
+ n_uids n_subk n_sigs n_revoc sec_read sec_imported
+ sec_dups not_imported'''.split()
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.imported = []
+ self.results = []
+ self.fingerprints = []
+ for result in self.counts:
+ setattr(self, result, None)
+
+ def __nonzero__(self):
+ if self.not_imported: return False
+ if not self.fingerprints: return False
+ return True
+
+ __bool__ = __nonzero__
+
+ ok_reason = {
+ '0': 'Not actually changed',
+ '1': 'Entirely new key',
+ '2': 'New user IDs',
+ '4': 'New signatures',
+ '8': 'New subkeys',
+ '16': 'Contains private key',
+ }
+
+ problem_reason = {
+ '0': 'No specific reason given',
+ '1': 'Invalid Certificate',
+ '2': 'Issuer Certificate missing',
+ '3': 'Certificate Chain too long',
+ '4': 'Error storing certificate',
+ }
+
+ def handle_status(self, key, value):
+ if key == "IMPORTED":
+ # this duplicates info we already see in import_ok & import_problem
+ pass
+ elif key == "NODATA":
+ self.results.append({'fingerprint': None,
+ 'problem': '0', 'text': 'No valid data found'})
+ elif key == "IMPORT_OK":
+ reason, fingerprint = value.split()
+ reasons = []
+ for code, text in list(self.ok_reason.items()):
+ if int(reason) | int(code) == int(reason):
+ reasons.append(text)
+ reasontext = '\n'.join(reasons) + "\n"
+ self.results.append({'fingerprint': fingerprint,
+ 'ok': reason, 'text': reasontext})
+ self.fingerprints.append(fingerprint)
+ elif key == "IMPORT_PROBLEM":
+ try:
+ reason, fingerprint = value.split()
+ except:
+ reason = value
+ fingerprint = '<unknown>'
+ self.results.append({'fingerprint': fingerprint,
+ 'problem': reason, 'text': self.problem_reason[reason]})
+ elif key == "IMPORT_RES":
+ import_res = value.split()
+ for i in range(len(self.counts)):
+ setattr(self, self.counts[i], int(import_res[i]))
+ elif key == "KEYEXPIRED":
+ self.results.append({'fingerprint': None,
+ 'problem': '0', 'text': 'Key expired'})
+ elif key == "SIGEXPIRED":
+ self.results.append({'fingerprint': None,
+ 'problem': '0', 'text': 'Signature expired'})
+ else:
+ raise ValueError("Unknown status message: %r" % key)
+
+ def summary(self):
+ l = []
+ l.append('%d imported'%self.imported)
+ if self.not_imported:
+ l.append('%d not imported'%self.not_imported)
+ return ', '.join(l)
+
+class ListKeys(list):
+ ''' Handle status messages for --list-keys.
+
+ Handle pub and uid (relating the latter to the former).
+
+ Don't care about (info from src/DETAILS):
+
+ crt = X.509 certificate
+ crs = X.509 certificate and private key available
+ sub = subkey (secondary key)
+ ssb = secret subkey (secondary key)
+ uat = user attribute (same as user id except for field 10).
+ sig = signature
+ rev = revocation signature
+ pkd = public key data (special field format, see below)
+ grp = reserved for gpgsm
+ rvk = revocation key
+ '''
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.curkey = None
+ self.fingerprints = []
+ self.uids = []
+
+ def key(self, args):
+ vars = ("""
+ type trust length algo keyid date expires dummy ownertrust uid
+ """).split()
+ self.curkey = {}
+ for i in range(len(vars)):
+ self.curkey[vars[i]] = args[i]
+ self.curkey['uids'] = []
+ if self.curkey['uid']:
+ self.curkey['uids'].append(self.curkey['uid'])
+ del self.curkey['uid']
+ self.append(self.curkey)
+
+ pub = sec = key
+
+ def fpr(self, args):
+ self.curkey['fingerprint'] = args[9]
+ self.fingerprints.append(args[9])
+
+ def uid(self, args):
+ self.curkey['uids'].append(args[9])
+ self.uids.append(args[9])
+
+ def handle_status(self, key, value):
+ pass
+
+class Crypt(Verify):
+ "Handle status messages for --encrypt and --decrypt"
+ def __init__(self, gpg):
+ Verify.__init__(self, gpg)
+ self.data = ''
+ self.ok = False
+ self.status = ''
+
+ def __nonzero__(self):
+ if self.ok: return True
+ return False
+
+ __bool__ = __nonzero__
+
+ def __str__(self):
+ return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
+
+ def handle_status(self, key, value):
+ if key in ("ENC_TO", "USERID_HINT", "GOODMDC", "END_DECRYPTION",
+ "BEGIN_SIGNING", "NO_SECKEY", "ERROR", "NODATA"):
+ # in the case of ERROR, this is because a more specific error
+ # message will have come first
+ pass
+ elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
+ "MISSING_PASSPHRASE", "DECRYPTION_FAILED",
+ "KEY_NOT_CREATED"):
+ self.status = key.replace("_", " ").lower()
+ elif key == "NEED_PASSPHRASE_SYM":
+ self.status = 'need symmetric passphrase'
+ elif key == "BEGIN_DECRYPTION":
+ self.status = 'decryption incomplete'
+ elif key == "BEGIN_ENCRYPTION":
+ self.status = 'encryption incomplete'
+ elif key == "DECRYPTION_OKAY":
+ self.status = 'decryption ok'
+ self.ok = True
+ elif key == "END_ENCRYPTION":
+ self.status = 'encryption ok'
+ self.ok = True
+ elif key == "INV_RECP":
+ self.status = 'invalid recipient'
+ elif key == "KEYEXPIRED":
+ self.status = 'key expired'
+ elif key == "SIG_CREATED":
+ self.status = 'sig created'
+ elif key == "SIGEXPIRED":
+ self.status = 'sig expired'
+ else:
+ Verify.handle_status(self, key, value)
+
+class GenKey(object):
+ "Handle status messages for --gen-key"
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.type = None
+ self.fingerprint = None
+
+ def __nonzero__(self):
+ if self.fingerprint: return True
+ return False
+
+ __bool__ = __nonzero__
+
+ def __str__(self):
+ return self.fingerprint or ''
+
+ def handle_status(self, key, value):
+ if key in ("PROGRESS", "GOOD_PASSPHRASE", "NODATA"):
+ pass
+ elif key == "KEY_CREATED":
+ (self.type,self.fingerprint) = value.split()
+ else:
+ raise ValueError("Unknown status message: %r" % key)
+
+class DeleteResult(object):
+ "Handle status messages for --delete-key and --delete-secret-key"
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.status = 'ok'
+
+ def __str__(self):
+ return self.status
+
+ problem_reason = {
+ '1': 'No such key',
+ '2': 'Must delete secret key first',
+ '3': 'Ambigious specification',
+ }
+
+ def handle_status(self, key, value):
+ if key == "DELETE_PROBLEM":
+ self.status = self.problem_reason.get(value,
+ "Unknown error: %r" % value)
+ else:
+ raise ValueError("Unknown status message: %r" % key)
+
+class Sign(object):
+ "Handle status messages for --sign"
+ def __init__(self, gpg):
+ self.gpg = gpg
+ self.type = None
+ self.fingerprint = None
+
+ def __nonzero__(self):
+ return self.fingerprint is not None
+
+ __bool__ = __nonzero__
+
+ def __str__(self):
+ return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
+
+ def handle_status(self, key, value):
+ if key in ("USERID_HINT", "NEED_PASSPHRASE", "BAD_PASSPHRASE",
+ "GOOD_PASSPHRASE", "BEGIN_SIGNING"):
+ pass
+ elif key == "SIG_CREATED":
+ (self.type,
+ algo, hashalgo, cls,
+ self.timestamp, self.fingerprint
+ ) = value.split()
+ else:
+ raise ValueError("Unknown status message: %r" % key)
+
+
+class GPG(object):
+
+ decode_errors = 'strict'
+
+ result_map = {
+ 'crypt': Crypt,
+ 'delete': DeleteResult,
+ 'generate': GenKey,
+ 'import': ImportResult,
+ 'list': ListKeys,
+ 'sign': Sign,
+ 'verify': Verify,
+ }
+
+ "Encapsulate access to the gpg executable"
+ def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
+ use_agent=False, keyring=None):
+ """Initialize a GPG process wrapper. Options are:
+
+ gpgbinary -- full pathname for GPG binary.
+
+ gnupghome -- full pathname to where we can find the public and
+ private keyrings. Default is whatever gpg defaults to.
+ keyring -- name of alternative keyring file to use. If specified,
+ the default keyring is not used.
+ """
+ self.gpgbinary = gpgbinary
+ self.gnupghome = gnupghome
+ self.keyring = keyring
+ self.verbose = verbose
+ self.use_agent = use_agent
+ self.encoding = locale.getpreferredencoding()
+ if self.encoding is None: # This happens on Jython!
+ self.encoding = sys.stdin.encoding
+ if gnupghome and not os.path.isdir(self.gnupghome):
+ os.makedirs(self.gnupghome,0x1C0)
+ p = self._open_subprocess(["--version"])
+ result = self.result_map['verify'](self) # any result will do for this
+ self._collect_output(p, result, stdin=p.stdin)
+ if p.returncode != 0:
+ raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
+ result.stderr))
+
+ def _open_subprocess(self, args, passphrase=False):
+ # Internal method: open a pipe to a GPG subprocess and return
+ # the file objects for communicating with it.
+ cmd = [self.gpgbinary, '--status-fd 2 --no-tty']
+ if self.gnupghome:
+ cmd.append('--homedir "%s" ' % self.gnupghome)
+ if self.keyring:
+ cmd.append('--no-default-keyring --keyring "%s" ' % self.keyring)
+ if passphrase:
+ cmd.append('--batch --passphrase-fd 0')
+ if self.use_agent:
+ cmd.append('--use-agent')
+ cmd.extend(args)
+ cmd = ' '.join(cmd)
+ if self.verbose:
+ print(cmd)
+ logger.debug("%s", cmd)
+ return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+
+ def _read_response(self, stream, result):
+ # Internal method: reads all the stderr output from GPG, taking notice
+ # only of lines that begin with the magic [GNUPG:] prefix.
+ #
+ # Calls methods on the response object for each valid token found,
+ # with the arg being the remainder of the status line.
+ lines = []
+ while True:
+ line = stream.readline()
+ if len(line) == 0:
+ break
+ lines.append(line)
+ line = line.rstrip()
+ if self.verbose:
+ print(line)
+ logger.debug("%s", line)
+ if line[0:9] == '[GNUPG:] ':
+ # Chop off the prefix
+ line = line[9:]
+ L = line.split(None, 1)
+ keyword = L[0]
+ if len(L) > 1:
+ value = L[1]
+ else:
+ value = ""
+ result.handle_status(keyword, value)
+ result.stderr = ''.join(lines)
+
+ def _read_data(self, stream, result):
+ # Read the contents of the file from GPG's stdout
+ chunks = []
+ while True:
+ data = stream.read(1024)
+ if len(data) == 0:
+ break
+ logger.debug("chunk: %r" % data[:256])
+ chunks.append(data)
+ if _py3k:
+ # Join using b'' or '', as appropriate
+ result.data = type(data)().join(chunks)
+ else:
+ result.data = ''.join(chunks)
+
+ def _collect_output(self, process, result, writer=None, stdin=None):
+ """
+ Drain the subprocesses output streams, writing the collected output
+ to the result. If a writer thread (writing to the subprocess) is given,
+ make sure it's joined before returning. If a stdin stream is given,
+ close it before returning.
+ """
+ stderr = codecs.getreader(self.encoding)(process.stderr)
+ rr = threading.Thread(target=self._read_response, args=(stderr, result))
+ rr.setDaemon(True)
+ logger.debug('stderr reader: %r', rr)
+ rr.start()
+
+ stdout = process.stdout
+ dr = threading.Thread(target=self._read_data, args=(stdout, result))
+ dr.setDaemon(True)
+ logger.debug('stdout reader: %r', dr)
+ dr.start()
+
+ dr.join()
+ rr.join()
+ if writer is not None:
+ writer.join()
+ process.wait()
+ if stdin is not None:
+ try:
+ stdin.close()
+ except IOError:
+ pass
+ stderr.close()
+ stdout.close()
+
+ def _handle_io(self, args, file, result, passphrase=None, binary=False):
+ "Handle a call to GPG - pass input data, collect output data"
+ # Handle a basic data call - pass data to GPG, handle the output
+ # including status information. Garbage In, Garbage Out :)
+ p = self._open_subprocess(args, passphrase is not None)
+ if not binary:
+ stdin = codecs.getwriter(self.encoding)(p.stdin)
+ else:
+ stdin = p.stdin
+ if passphrase:
+ _write_passphrase(stdin, passphrase, self.encoding)
+ writer = _threaded_copy_data(file, stdin)
+ self._collect_output(p, result, writer, stdin)
+ return result
+
+ #
+ # SIGNATURE METHODS
+ #
+ def sign(self, message, **kwargs):
+ """sign message"""
+ f = _make_binary_stream(message, self.encoding)
+ result = self.sign_file(f, **kwargs)
+ f.close()
+ return result
+
+ def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
+ detach=False, binary=False):
+ """sign file"""
+ logger.debug("sign_file: %s", file)
+ if binary:
+ args = ['-s']
+ else:
+ args = ['-sa']
+ # You can't specify detach-sign and clearsign together: gpg ignores
+ # the detach-sign in that case.
+ if detach:
+ args.append("--detach-sign")
+ elif clearsign:
+ args.append("--clearsign")
+ if keyid:
+ args.append('--default-key "%s"' % keyid)
+ result = self.result_map['sign'](self)
+ #We could use _handle_io here except for the fact that if the
+ #passphrase is bad, gpg bails and you can't write the message.
+ p = self._open_subprocess(args, passphrase is not None)
+ try:
+ stdin = p.stdin
+ if passphrase:
+ _write_passphrase(stdin, passphrase, self.encoding)
+ writer = _threaded_copy_data(file, stdin)
+ except IOError:
+ logging.exception("error writing message")
+ writer = None
+ self._collect_output(p, result, writer, stdin)
+ return result
+
+ def verify(self, data):
+ """Verify the signature on the contents of the string 'data'
+
+ >>> gpg = GPG(gnupghome="keys")
+ >>> input = gpg.gen_key_input(Passphrase='foo')
+ >>> key = gpg.gen_key(input)
+ >>> assert key
+ >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
+ >>> assert not sig
+ >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
+ >>> assert sig
+ >>> verify = gpg.verify(sig.data)
+ >>> assert verify
+
+ """
+ f = _make_binary_stream(data, self.encoding)
+ result = self.verify_file(f)
+ f.close()
+ return result
+
+ def verify_file(self, file, data_filename=None):
+ "Verify the signature on the contents of the file-like object 'file'"
+ logger.debug('verify_file: %r, %r', file, data_filename)
+ result = self.result_map['verify'](self)
+ args = ['--verify']
+ if data_filename is None:
+ self._handle_io(args, file, result, binary=True)
+ else:
+ logger.debug('Handling detached verification')
+ import tempfile
+ fd, fn = tempfile.mkstemp(prefix='pygpg')
+ s = file.read()
+ file.close()
+ logger.debug('Wrote to temp file: %r', s)
+ os.write(fd, s)
+ os.close(fd)
+ args.append(fn)
+ args.append('"%s"' % data_filename)
+ try:
+ p = self._open_subprocess(args)
+ self._collect_output(p, result, stdin=p.stdin)
+ finally:
+ os.unlink(fn)
+ return result
+
+ #
+ # KEY MANAGEMENT
+ #
+
+ def import_keys(self, key_data):
+ """ import the key_data into our keyring
+
+ >>> import shutil
+ >>> shutil.rmtree("keys")
+ >>> gpg = GPG(gnupghome="keys")
+ >>> input = gpg.gen_key_input()
+ >>> result = gpg.gen_key(input)
+ >>> print1 = result.fingerprint
+ >>> result = gpg.gen_key(input)
+ >>> print2 = result.fingerprint
+ >>> pubkey1 = gpg.export_keys(print1)
+ >>> seckey1 = gpg.export_keys(print1,secret=True)
+ >>> seckeys = gpg.list_keys(secret=True)
+ >>> pubkeys = gpg.list_keys()
+ >>> assert print1 in seckeys.fingerprints
+ >>> assert print1 in pubkeys.fingerprints
+ >>> str(gpg.delete_keys(print1))
+ 'Must delete secret key first'
+ >>> str(gpg.delete_keys(print1,secret=True))
+ 'ok'
+ >>> str(gpg.delete_keys(print1))
+ 'ok'
+ >>> str(gpg.delete_keys("nosuchkey"))
+ 'No such key'
+ >>> seckeys = gpg.list_keys(secret=True)
+ >>> pubkeys = gpg.list_keys()
+ >>> assert not print1 in seckeys.fingerprints
+ >>> assert not print1 in pubkeys.fingerprints
+ >>> result = gpg.import_keys('foo')
+ >>> assert not result
+ >>> result = gpg.import_keys(pubkey1)
+ >>> pubkeys = gpg.list_keys()
+ >>> seckeys = gpg.list_keys(secret=True)
+ >>> assert not print1 in seckeys.fingerprints
+ >>> assert print1 in pubkeys.fingerprints
+ >>> result = gpg.import_keys(seckey1)
+ >>> assert result
+ >>> seckeys = gpg.list_keys(secret=True)
+ >>> pubkeys = gpg.list_keys()
+ >>> assert print1 in seckeys.fingerprints
+ >>> assert print1 in pubkeys.fingerprints
+ >>> assert print2 in pubkeys.fingerprints
+
+ """
+ result = self.result_map['import'](self)
+ logger.debug('import_keys: %r', key_data[:256])
+ data = _make_binary_stream(key_data, self.encoding)
+ self._handle_io(['--import'], data, result, binary=True)
+ logger.debug('import_keys result: %r', result.__dict__)
+ data.close()
+ return result
+
+ def recv_keys(self, keyserver, *keyids):
+ """Import a key from a keyserver
+
+ >>> import shutil
+ >>> shutil.rmtree("keys")
+ >>> gpg = GPG(gnupghome="keys")
+ >>> result = gpg.recv_keys('pgp.mit.edu', '3FF0DB166A7476EA')
+ >>> assert result
+
+ """
+ result = self.result_map['import'](self)
+ logger.debug('recv_keys: %r', keyids)
+ data = _make_binary_stream("", self.encoding)
+ #data = ""
+ args = ['--keyserver', keyserver, '--recv-keys']
+ args.extend(keyids)
+ self._handle_io(args, data, result, binary=True)
+ logger.debug('recv_keys result: %r', result.__dict__)
+ data.close()
+ return result
+
+ def delete_keys(self, fingerprints, secret=False):
+ which='key'
+ if secret:
+ which='secret-key'
+ if _is_sequence(fingerprints):
+ fingerprints = ' '.join(fingerprints)
+ args = ['--batch --delete-%s "%s"' % (which, fingerprints)]
+ result = self.result_map['delete'](self)
+ p = self._open_subprocess(args)
+ self._collect_output(p, result, stdin=p.stdin)
+ return result
+
+ def export_keys(self, keyids, secret=False):
+ "export the indicated keys. 'keyid' is anything gpg accepts"
+ which=''
+ if secret:
+ which='-secret-key'
+ if _is_sequence(keyids):
+ keyids = ' '.join(['"%s"' % k for k in keyids])
+ args = ["--armor --export%s %s" % (which, keyids)]
+ p = self._open_subprocess(args)
+ # gpg --export produces no status-fd output; stdout will be
+ # empty in case of failure
+ #stdout, stderr = p.communicate()
+ result = self.result_map['delete'](self) # any result will do
+ self._collect_output(p, result, stdin=p.stdin)
+ logger.debug('export_keys result: %r', result.data)
+ return result.data.decode(self.encoding, self.decode_errors)
+
+ def list_keys(self, secret=False):
+ """ list the keys currently in the keyring
+
+ >>> import shutil
+ >>> shutil.rmtree("keys")
+ >>> gpg = GPG(gnupghome="keys")
+ >>> input = gpg.gen_key_input()
+ >>> result = gpg.gen_key(input)
+ >>> print1 = result.fingerprint
+ >>> result = gpg.gen_key(input)
+ >>> print2 = result.fingerprint
+ >>> pubkeys = gpg.list_keys()
+ >>> assert print1 in pubkeys.fingerprints
+ >>> assert print2 in pubkeys.fingerprints
+
+ """
+
+ which='keys'
+ if secret:
+ which='secret-keys'
+ args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which,)
+ args = [args]
+ p = self._open_subprocess(args)
+
+ # there might be some status thingumy here I should handle... (amk)
+ # ...nope, unless you care about expired sigs or keys (stevegt)
+
+ # Get the response information
+ result = self.result_map['list'](self)
+ self._collect_output(p, result, stdin=p.stdin)
+ lines = result.data.decode(self.encoding,
+ self.decode_errors).splitlines()
+ valid_keywords = 'pub uid sec fpr'.split()
+ for line in lines:
+ if self.verbose:
+ print(line)
+ logger.debug("line: %r", line.rstrip())
+ if not line:
+ break
+ L = line.strip().split(':')
+ if not L:
+ continue
+ keyword = L[0]
+ if keyword in valid_keywords:
+ getattr(result, keyword)(L)
+ return result
+
+ def gen_key(self, input):
+ """Generate a key; you might use gen_key_input() to create the
+ control input.
+
+ >>> gpg = GPG(gnupghome="keys")
+ >>> input = gpg.gen_key_input()
+ >>> result = gpg.gen_key(input)
+ >>> assert result
+ >>> result = gpg.gen_key('foo')
+ >>> assert not result
+
+ """
+ args = ["--gen-key --batch"]
+ result = self.result_map['generate'](self)
+ f = _make_binary_stream(input, self.encoding)
+ self._handle_io(args, f, result, binary=True)
+ f.close()
+ return result
+
+ def gen_key_input(self, **kwargs):
+ """
+ Generate --gen-key input per gpg doc/DETAILS
+ """
+ parms = {}
+ for key, val in list(kwargs.items()):
+ key = key.replace('_','-').title()
+ parms[key] = val
+ parms.setdefault('Key-Type','RSA')
+ parms.setdefault('Key-Length',1024)
+ parms.setdefault('Name-Real', "Autogenerated Key")
+ parms.setdefault('Name-Comment', "Generated by gnupg.py")
+ try:
+ logname = os.environ['LOGNAME']
+ except KeyError:
+ logname = os.environ['USERNAME']
+ hostname = socket.gethostname()
+ parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
+ hostname))
+ out = "Key-Type: %s\n" % parms.pop('Key-Type')
+ for key, val in list(parms.items()):
+ out += "%s: %s\n" % (key, val)
+ out += "%commit\n"
+ return out
+
+ # Key-Type: RSA
+ # Key-Length: 1024
+ # Name-Real: ISdlink Server on %s
+ # Name-Comment: Created by %s
+ # Name-Email: isdlink@%s
+ # Expire-Date: 0
+ # %commit
+ #
+ #
+ # Key-Type: DSA
+ # Key-Length: 1024
+ # Subkey-Type: ELG-E
+ # Subkey-Length: 1024
+ # Name-Real: Joe Tester
+ # Name-Comment: with stupid passphrase
+ # Name-Email: joe@foo.bar
+ # Expire-Date: 0
+ # Passphrase: abc
+ # %pubring foo.pub
+ # %secring foo.sec
+ # %commit
+
+ #
+ # ENCRYPTION
+ #
+ def encrypt_file(self, file, recipients, sign=None,
+ always_trust=False, passphrase=None,
+ armor=True, output=None, symmetric=False):
+ "Encrypt the message read from the file-like object 'file'"
+ args = ['--encrypt']
+ if symmetric:
+ args = ['--symmetric']
+ else:
+ args = ['--encrypt']
+ if not _is_sequence(recipients):
+ recipients = (recipients,)
+ for recipient in recipients:
+ args.append('--recipient "%s"' % recipient)
+ if armor: # create ascii-armored output - set to False for binary output
+ args.append('--armor')
+ if output: # write the output to a file with the specified name
+ if os.path.exists(output):
+ os.remove(output) # to avoid overwrite confirmation message
+ args.append('--output "%s"' % output)
+ if sign:
+ args.append('--sign --default-key "%s"' % sign)
+ if always_trust:
+ args.append("--always-trust")
+ result = self.result_map['crypt'](self)
+ self._handle_io(args, file, result, passphrase=passphrase, binary=True)
+ logger.debug('encrypt result: %r', result.data)
+ return result
+
+ def encrypt(self, data, recipients, **kwargs):
+ """Encrypt the message contained in the string 'data'
+
+ >>> import shutil
+ >>> if os.path.exists("keys"):
+ ... shutil.rmtree("keys")
+ >>> gpg = GPG(gnupghome="keys")
+ >>> input = gpg.gen_key_input(passphrase='foo')
+ >>> result = gpg.gen_key(input)
+ >>> print1 = result.fingerprint
+ >>> input = gpg.gen_key_input()
+ >>> result = gpg.gen_key(input)
+ >>> print2 = result.fingerprint
+ >>> result = gpg.encrypt("hello",print2)
+ >>> message = str(result)
+ >>> assert message != 'hello'
+ >>> result = gpg.decrypt(message)
+ >>> assert result
+ >>> str(result)
+ 'hello'
+ >>> result = gpg.encrypt("hello again",print1)
+ >>> message = str(result)
+ >>> result = gpg.decrypt(message)
+ >>> result.status == 'need passphrase'
+ True
+ >>> result = gpg.decrypt(message,passphrase='bar')
+ >>> result.status in ('decryption failed', 'bad passphrase')
+ True
+ >>> assert not result
+ >>> result = gpg.decrypt(message,passphrase='foo')
+ >>> result.status == 'decryption ok'
+ True
+ >>> str(result)
+ 'hello again'
+ >>> result = gpg.encrypt("signed hello",print2,sign=print1)
+ >>> result.status == 'need passphrase'
+ True
+ >>> result = gpg.encrypt("signed hello",print2,sign=print1,passphrase='foo')
+ >>> result.status == 'encryption ok'
+ True
+ >>> message = str(result)
+ >>> result = gpg.decrypt(message)
+ >>> result.status == 'decryption ok'
+ True
+ >>> assert result.fingerprint == print1
+
+ """
+ data = _make_binary_stream(data, self.encoding)
+ result = self.encrypt_file(data, recipients, **kwargs)
+ data.close()
+ return result
+
+ def decrypt(self, message, **kwargs):
+ data = _make_binary_stream(message, self.encoding)
+ result = self.decrypt_file(data, **kwargs)
+ data.close()
+ return result
+
+ def decrypt_file(self, file, always_trust=False, passphrase=None,
+ output=None):
+ args = ["--decrypt"]
+ if output: # write the output to a file with the specified name
+ if os.path.exists(output):
+ os.remove(output) # to avoid overwrite confirmation message
+ args.append('--output "%s"' % output)
+ if always_trust:
+ args.append("--always-trust")
+ result = self.result_map['crypt'](self)
+ self._handle_io(args, file, result, passphrase, binary=True)
+ logger.debug('decrypt result: %r', result.data)
+ return result
+
diff --git a/tools/dist/backport.pl b/tools/dist/backport.pl
index 5a062ba..ab5c823 100755
--- a/tools/dist/backport.pl
+++ b/tools/dist/backport.pl
@@ -1,6 +1,7 @@
#!/usr/bin/perl -l
use warnings;
use strict;
+use feature qw/switch say/;
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -21,14 +22,24 @@ use strict;
use Term::ReadKey qw/ReadMode ReadKey/;
use File::Temp qw/tempfile/;
-
-$/ = ""; # paragraph mode
+use POSIX qw/ctermid/;
my $SVN = $ENV{SVN} || 'svn'; # passed unquoted to sh
my $VIM = 'vim';
my $STATUS = './STATUS';
my $BRANCHES = '^/subversion/branches';
+my $YES = $ENV{YES}; # batch mode: eliminate prompts, add sleeps
+my $WET_RUN = qw[false true][1]; # don't commit
+my $DEBUG = qw[false true][0]; # 'set -x', etc
+
+# derived values
+my $SVNq;
+
+$SVN .= " --non-interactive" if $YES or not defined ctermid;
+$SVNq = "$SVN -q ";
+$SVNq =~ s/-q// if $DEBUG eq 'true';
+
sub usage {
my $basename = $0;
$basename =~ s#.*/##;
@@ -61,23 +72,28 @@ sub merge {
my %entry = @_;
my ($logmsg_fh, $logmsg_filename) = tempfile();
- my $mergeargs;
+ my ($mergeargs, $pattern);
my $backupfile = "backport_pl.$$.tmp";
if ($entry{branch}) {
+ # NOTE: This doesn't escape the branch into the pattern.
+ $pattern = sprintf '\V\(%s branch(es)?\|branches\/%s\|Branch(es)?:\n *%s\)', $entry{branch}, $entry{branch}, $entry{branch};
$mergeargs = "--reintegrate $BRANCHES/$entry{branch}";
- print $logmsg_fh "Reintergrate the $BRANCHES/$entry{branch} branch:";
+ print $logmsg_fh "Reintegrate the $entry{header}:";
print $logmsg_fh "";
- } else {
+ } elsif (@{$entry{revisions}}) {
+ $pattern = '^ [*] \V' . 'r' . $entry{revisions}->[0];
$mergeargs = join " ", (map { "-c$_" } @{$entry{revisions}}), '^/subversion/trunk';
if (@{$entry{revisions}} > 1) {
- print $logmsg_fh "Merge the r$entry{revisions}->[0] group from trunk:";
+ print $logmsg_fh "Merge the $entry{header} from trunk:";
print $logmsg_fh "";
} else {
print $logmsg_fh "Merge r$entry{revisions}->[0] from trunk:";
print $logmsg_fh "";
}
+ } else {
+ die "Don't know how to call $entry{header}";
}
print $logmsg_fh $_ for @{$entry{entry}};
close $logmsg_fh or die "Can't close $logmsg_filename: $!";
@@ -85,21 +101,36 @@ sub merge {
my $script = <<"EOF";
#!/bin/sh
set -e
+if $DEBUG; then
+ set -x
+fi
$SVN diff > $backupfile
-$SVN revert -R .
-$SVN up
-$SVN merge $mergeargs
-$VIM -e -s -n -N -i NONE -u NONE -c '/^ [*] r$entry{revisions}->[0]/normal! dap' -c wq $STATUS
-$SVN commit -F $logmsg_filename
+$SVNq revert -R .
+$SVNq up
+$SVNq merge $mergeargs
+$VIM -e -s -n -N -i NONE -u NONE -c '/$pattern/normal! dap' -c wq $STATUS
+if $WET_RUN; then
+ $SVNq commit -F $logmsg_filename
+else
+ echo "Committing:"
+ $SVN status -q
+ cat $logmsg_filename
+fi
EOF
$script .= <<"EOF" if $entry{branch};
reinteg_rev=\`$SVN info $STATUS | sed -ne 's/Last Changed Rev: //p'\`
-$SVN rm $BRANCHES/$entry{branch}\
- -m "Remove the '$entry{branch}' branch, reintegrated in r\$reinteg_rev."
+if $WET_RUN; then
+ # Sleep to avoid out-of-order commit notifications
+ if [ -n "\$YES" ]; then sleep 15; fi
+ $SVNq rm $BRANCHES/$entry{branch} -m "Remove the '$entry{branch}' branch, reintegrated in r\$reinteg_rev."
+ if [ -n "\$YES" ]; then sleep 1; fi
+else
+ echo "Removing reintegrated '$entry{branch}' branch"
+fi
EOF
- open SHELL, '|-', qw#/bin/sh -x# or die $!;
+ open SHELL, '|-', qw#/bin/sh# or die $!;
print SHELL $script;
close SHELL or warn "$0: sh($?): $!";
@@ -107,6 +138,14 @@ EOF
unlink $logmsg_filename unless $? or $!;
}
+sub sanitize_branch {
+ local $_ = shift;
+ s#.*/##;
+ s/^\s*//;
+ s/\s*$//;
+ return $_;
+}
+
# TODO: may need to parse other headers too?
sub parse_entry {
my @lines = @_;
@@ -118,33 +157,38 @@ sub parse_entry {
s/^ // for @_;
# revisions
+ $branch = sanitize_branch $1 if $_[0] =~ /^(\S*) branch$/;
while ($_[0] =~ /^r/) {
- while ($_[0] =~ s/^r(\d+)(?:,\s*)?//) {
+ while ($_[0] =~ s/^r(\d+)(?:$|[,; ]+)//) {
push @revisions, $1;
}
shift;
}
# summary
- push @logsummary, shift until $_[0] =~ /^\w+:/;
+ push @logsummary, shift until $_[0] =~ /^\s*\w+:/ or not defined $_[0];
# votes
- unshift @votes, pop until $_[-1] =~ /^Votes:/;
+ unshift @votes, pop until $_[-1] =~ /^\s*Votes:/ or not defined $_[-1];
pop;
# branch
while (@_) {
- shift and next unless $_[0] =~ s/^Branch:\s*//;
- $branch = (shift || shift || die "Branch header found without value");
- $branch =~ s#.*/##;
- $branch =~ s/^\s*//;
- $branch =~ s/\s*$//;
+ shift and next unless $_[0] =~ s/^\s*Branch(es)?:\s*//;
+ $branch = sanitize_branch (shift || shift || die "Branch header found without value");
}
+ # Compute a header.
+ my $header;
+ $header = "r$revisions[0] group" if @revisions;
+ $header = "$branch branch" if $branch;
+ warn "No header for [@lines]" unless $header;
+
return (
revisions => [@revisions],
logsummary => [@logsummary],
branch => $branch,
+ header => $header,
votes => [@votes],
entry => [@lines],
);
@@ -152,40 +196,75 @@ sub parse_entry {
sub handle_entry {
my %entry = parse_entry @_;
+ my @vetoes = grep { /^ -1:/ } @{$entry{votes}};
- print "";
- print "\n>>> The r$entry{revisions}->[0] group:";
- print join ", ", map { "r$_" } @{$entry{revisions}};
- print "$BRANCHES/$entry{branch}" if $entry{branch};
- print "";
- print for @{$entry{logsummary}};
- print "";
- print for @{$entry{votes}};
- print "";
- print "Vetoes found!" if grep { /^ -1:/ } @{$entry{votes}};
-
- # TODO: this changes ./STATUS, which we're reading below, but
+ if ($YES) {
+ merge %entry unless @vetoes;
+ } else {
+ print "";
+ print "\n>>> The $entry{header}:";
+ print join ", ", map { "r$_" } @{$entry{revisions}};
+ print "$BRANCHES/$entry{branch}" if $entry{branch};
+ print "";
+ print for @{$entry{logsummary}};
+ print "";
+ print for @{$entry{votes}};
+ print "";
+ print "Vetoes found!" if @vetoes;
+
+ merge %entry if prompt;
+ }
+
+ # TODO: merge() changes ./STATUS, which we're reading below, but
# on my system the loop in main() doesn't seem to care.
- merge %entry if prompt;
1;
}
sub main {
usage, exit 0 if @ARGV;
- usage, exit 1 unless -r $STATUS;
- @ARGV = $STATUS;
- while (<>) {
- my @lines = split /\n/;
+ open STATUS, "<", $STATUS or (usage, exit 1);
- # Section header?
- print "\n\n=== $lines[0]" and next if $lines[0] =~ /^[A-Z].*:$/i;
+ # Because we use the ':normal' command in Vim...
+ die "A vim with the +ex_extra feature is required"
+ if `${VIM} --version` !~ /[+]ex_extra/;
- # Backport entry?
- handle_entry @lines and next if $lines[0] =~ /^ \*/;
+ # ### TODO: need to run 'revert' here
+ # ### TODO: both here and in merge(), unlink files that previous merges added
+ die "Local mods to STATUS file $STATUS" if `$SVN status -q $STATUS`;
- warn "Unknown entry '$lines[0]' at $ARGV:$.\n";
+ # Skip most of the file
+ while (<STATUS>) {
+ last if /^Approved changes/;
+ }
+ while (<STATUS>) {
+ last unless /^=+$/;
+ }
+ $/ = ""; # paragraph mode
+
+ while (<STATUS>) {
+ my @lines = split /\n/;
+
+ given ($lines[0]) {
+ # Section header
+ when (/^[A-Z].*:$/i) {
+ print "\n\n=== $lines[0]" unless $YES;
+ }
+ # Separator after section header
+ when (/^=+$/i) {
+ break;
+ }
+ # Backport entry?
+ when (/^ \*/) {
+ warn "Too many bullets in $lines[0]" and next
+ if grep /^ \*/, @lines[1..$#lines];
+ handle_entry @lines;
+ }
+ default {
+ warn "Unknown entry '$lines[0]' at $ARGV:$.\n";
+ }
+ }
}
}
diff --git a/tools/dist/collect_sigs.py b/tools/dist/collect_sigs.py
index d7204d6..cdb22bf 100755
--- a/tools/dist/collect_sigs.py
+++ b/tools/dist/collect_sigs.py
@@ -75,10 +75,12 @@ def generate_asc_files(target_dir='.'):
db = sqlite3.connect(os.path.join(target_dir, 'sigs.db'))
curs = db.cursor()
- curs.execute('SELECT filename, signature FROM signatures;')
+ like_filename = 'subversion-%s.%%' % config.version
+ curs.execute('''SELECT filename, signature FROM signatures
+ WHERE filename LIKE ?''', (like_filename, ) )
for filename, signature in curs:
fd = _open(filename)
- fd.write(signature + "\n")
+ fd.write(signature)
for fd in fds.values():
fd.flush()
@@ -181,8 +183,11 @@ def list_signatures():
lines = ""
curs = db.cursor()
+ like_filename = 'subversion-%s.%%' % config.version
curs.execute('''SELECT filename, COUNT(*) FROM signatures
- GROUP BY filename ORDER BY filename''')
+ WHERE filename LIKE ?
+ GROUP BY filename ORDER BY filename''',
+ (like_filename, ) )
for filename, count in curs:
lines += '<a href="%s/%s.asc">%s.asc</a>: %d signature%s<br/>\n' \
% (os.getenv('SCRIPT_NAME'), filename, filename,
diff --git a/tools/dist/dist.sh b/tools/dist/dist.sh
index 1770a85..03d5c39 100755
--- a/tools/dist/dist.sh
+++ b/tools/dist/dist.sh
@@ -224,18 +224,33 @@ echo "Exporting $REPOS_PATH r$REVISION into sandbox..."
rm -f "$DISTPATH/STATUS"
+ver_major=`echo $VERSION | cut -d '.' -f 1`
+ver_minor=`echo $VERSION | cut -d '.' -f 2`
+ver_patch=`echo $VERSION | cut -d '.' -f 3`
+
# Remove contrib/ from our distribution tarball. Some of it is of
# unknown license, and usefulness.
# (See http://svn.haxx.se/dev/archive-2009-04/0166.shtml for discussion.)
-rm -rf "$DISTPATH/contrib"
+if [ "$ver_major" -eq "1" -a "$ver_minor" -ge "7" ]; then
+ rm -rf "$DISTPATH/contrib"
+fi
# Remove notes/ from our distribution tarball. It's large, but largely
# blue-sky and out-of-date, and of questionable use to end users.
-rm -rf "$DISTPATH/notes"
+if [ "$ver_major" -eq "1" -a "$ver_minor" -ge "7" ]; then
+ rm -rf "$DISTPATH/notes"
+fi
# Remove packages/ from the tarball.
# (See http://svn.haxx.se/dev/archive-2009-12/0205.shtml)
-rm -rf "$DISTPATH/packages"
+if [ "$ver_major" -eq "1" -a "$ver_minor" -ge "7" ]; then
+ rm -rf "$DISTPATH/packages"
+fi
+
+# Remove www/ from the tarball for 1.6.x and earlier releases
+if [ "$ver_major" -eq "1" -a "$ver_minor" -le "6" ]; then
+ rm -rf "$DISTPATH/www"
+fi
# Check for a recent enough Python
# Instead of attempting to deal with various line ending issues, just export
@@ -260,30 +275,24 @@ find "$DISTPATH" -name config.nice -print | xargs rm -f
# on end-user's systems, when they should just be compiled by the
# Release Manager and left at that.
-ver_major=`echo $VERSION | cut -d '.' -f 1`
-ver_minor=`echo $VERSION | cut -d '.' -f 2`
-ver_patch=`echo $VERSION | cut -d '.' -f 3`
-
vsn_file="$DISTPATH/subversion/include/svn_version.h"
-
-if [ "$VERSION" != "trunk" ]; then
+if [ "$VERSION" != "trunk" ] && [ "$VERSION" != "nightly" ]; then
sed \
- -e "/#define *SVN_VER_MAJOR/s/[0-9]\+/$ver_major/" \
- -e "/#define *SVN_VER_MINOR/s/[0-9]\+/$ver_minor/" \
- -e "/#define *SVN_VER_PATCH/s/[0-9]\+/$ver_patch/" \
+ -e "/#define *SVN_VER_MAJOR/s/[0-9][0-9]*/$ver_major/" \
+ -e "/#define *SVN_VER_MINOR/s/[0-9][0-9]*/$ver_minor/" \
+ -e "/#define *SVN_VER_PATCH/s/[0-9][0-9]*/$ver_patch/" \
-e "/#define *SVN_VER_TAG/s/\".*\"/\" ($VER_TAG)\"/" \
-e "/#define *SVN_VER_NUMTAG/s/\".*\"/\"$VER_NUMTAG\"/" \
- -e "/#define *SVN_VER_REVISION/s/[0-9]\+/$REVISION/" \
+ -e "/#define *SVN_VER_REVISION/s/[0-9][0-9]*/$REVISION/" \
< "$vsn_file" > "$vsn_file.tmp"
else
# Don't munge the version number if we are creating a nightly trunk tarball
sed \
-e "/#define *SVN_VER_TAG/s/\".*\"/\" ($VER_TAG)\"/" \
-e "/#define *SVN_VER_NUMTAG/s/\".*\"/\"$VER_NUMTAG\"/" \
- -e "/#define *SVN_VER_REVISION/s/[0-9]\+/$REVISION/" \
+ -e "/#define *SVN_VER_REVISION/s/[0-9]\\+/$REVISION/" \
< "$vsn_file" > "$vsn_file.tmp"
fi
-
mv -f "$vsn_file.tmp" "$vsn_file"
echo "Creating svn_version.h.dist, for use in tagging matching tarball..."
@@ -365,6 +374,10 @@ sign_file()
fi
}
+# allow md5sum and sha1sum tool names to be overridden
+[ -n "$MD5SUM" ] || MD5SUM=md5sum
+[ -n "$SHA1SUM" ] || SHA1SUM=sha1sum
+
echo ""
echo "Done:"
if [ -z "$ZIP" ]; then
@@ -372,23 +385,23 @@ if [ -z "$ZIP" ]; then
sign_file $DISTNAME.tar.gz $DISTNAME.tar.bz2
echo ""
echo "md5sums:"
- md5sum "$DISTNAME.tar.bz2" "$DISTNAME.tar.gz"
- type sha1sum > /dev/null 2>&1
+ $MD5SUM "$DISTNAME.tar.bz2" "$DISTNAME.tar.gz"
+ type $SHA1SUM > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo ""
echo "sha1sums:"
- sha1sum "$DISTNAME.tar.bz2" "$DISTNAME.tar.gz"
+ $SHA1SUM "$DISTNAME.tar.bz2" "$DISTNAME.tar.gz"
fi
else
ls -l "$DISTNAME.zip"
sign_file $DISTNAME.zip
echo ""
echo "md5sum:"
- md5sum "$DISTNAME.zip"
- type sha1sum > /dev/null 2>&1
+ $MD5SUM "$DISTNAME.zip"
+ type $SHA1SUM > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo ""
echo "sha1sum:"
- sha1sum "$DISTNAME.zip"
+ $SHA1SUM "$DISTNAME.zip"
fi
fi
diff --git a/tools/dist/getsigs.py b/tools/dist/getsigs.py
deleted file mode 100755
index 17086e1..0000000
--- a/tools/dist/getsigs.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# Less terrible, ugly hack of a script than getsigs.pl, but similar. Used to
-# verify the signatures on the release tarballs and produce the list of who
-# signed them in the format we use for the announcements.
-#
-# To use just run it in the directory with the signatures and tarballs and
-# pass the version of subversion you want to check. It assumes gpg is on
-# your path, if it isn't you should fix that. :D
-#
-# Script will die if any gpg process returns an error.
-#
-# Because I hate perl...
-
-import glob, subprocess, shutil, sys, re
-
-key_start = '-----BEGIN PGP SIGNATURE-----\n'
-sig_pattern = re.compile(r'^gpg: Signature made .*? using \w+ key ID (\w+)')
-fp_pattern = re.compile(r'^pub\s+(\w+\/\w+)[^\n]*\n\s+Key\sfingerprint\s=((\s+[0-9A-F]{4}){10})\nuid\s+([^<\(]+)\s')
-
-
-def grab_sig_ids():
- good_sigs = {}
-
- for filename in glob.glob('subversion-*.asc'):
- shutil.copyfile(filename, '%s.bak' % filename)
- text = open(filename).read()
- keys = text.split(key_start)
-
- for key in keys[1:]:
- open(filename, 'w').write(key_start + key)
- gpg = subprocess.Popen(['gpg', '--logger-fd', '1',
- '--verify', filename],
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
- rc = gpg.wait()
- output = gpg.stdout.read()
- if rc:
- # gpg choked, die with an error
- print(output)
- sys.stderr.write("BAD SIGNATURE in %s\n" % filename)
- shutil.move('%s.bak' % filename, filename)
- sys.exit(1)
-
- for line in output.split('\n'):
- match = sig_pattern.match(line)
- if match:
- key_id = match.groups()[0]
- good_sigs[key_id] = True
-
- shutil.move('%s.bak' % filename, filename)
-
- return good_sigs
-
-
-def generate_output(good_sigs):
- for id in good_sigs.keys():
- gpg = subprocess.Popen(['gpg', '--fingerprint', id],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- rc = gpg.wait()
- gpg_output = gpg.stdout.read()
- if rc:
- print(gpg_output)
- sys.stderr.write("UNABLE TO GET FINGERPRINT FOR %s" % id)
- sys.exit(1)
-
- gpg_output = "\n".join([ l for l in gpg_output.splitlines()
- if l[0:7] != 'Warning' ])
-
- fp = fp_pattern.match(gpg_output).groups()
- print(" %s [%s] with fingerprint:" % (fp[3], fp[0]))
- print(" %s" % fp[1])
-
-
-if __name__ == '__main__':
- if len(sys.argv) < 2:
- print("Give me a version number!")
- sys.exit(1)
-
- generate_output(grab_sig_ids())
diff --git a/tools/dist/make-deps-tarball.sh b/tools/dist/make-deps-tarball.sh
new file mode 100755
index 0000000..318adc6
--- /dev/null
+++ b/tools/dist/make-deps-tarball.sh
@@ -0,0 +1,121 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -e
+
+APR=apr-1.4.6
+APR_UTIL=apr-util-1.4.1
+NEON=neon-0.29.6
+SERF=serf-0.3.1
+ZLIB=zlib-1.2.7
+SQLITE_VERSION=3071400
+SQLITE=sqlite-amalgamation-$SQLITE_VERSION
+
+HTTPD=httpd-2.2.22
+HTTPD_OOPS=
+APR_ICONV=apr-iconv-1.2.1
+APR_ICONV_OOPS=
+
+WIN32_APR_VIA_HTTPD=1
+
+BASEDIR=`pwd`
+TEMPDIR=$BASEDIR/temp
+
+APACHE_MIRROR=http://archive.apache.org/dist
+
+create_deps() {
+ SVN_VERSION="$1"
+ set -x
+
+ mkdir -p $TEMPDIR
+ cd $TEMPDIR
+ wget -qnc $APACHE_MIRROR/apr/$APR.tar.bz2
+ wget -qnc $APACHE_MIRROR/apr/$APR_UTIL.tar.bz2
+ if [ -n "$WIN32_APR_VIA_HTTPD" ]; then
+ wget -qnc $APACHE_MIRROR/httpd/$HTTPD-win32-src$HTTPD_OOPS.zip
+ else
+ wget -qnc $APACHE_MIRROR/apr/$APR-win32-src.zip
+ wget -qnc $APACHE_MIRROR/apr/$APR_UTIL-win32-src.zip
+ wget -qnc $APACHE_MIRROR/apr/$APR_ICONV-win32-src$APR_ICONV_OOPS.zip
+ fi
+ wget -qnc http://webdav.org/neon/$NEON.tar.gz
+ wget -qnc http://serf.googlecode.com/files/$SERF.tar.bz2
+ wget -qnc http://www.zlib.net/$ZLIB.tar.bz2
+ wget -qnc http://www.sqlite.org/$SQLITE.zip
+
+ mkdir $BASEDIR/unix-dependencies
+ cd $BASEDIR/unix-dependencies
+ tar zxf $TEMPDIR/$NEON.tar.gz
+ tar jxf $TEMPDIR/$ZLIB.tar.bz2
+ tar jxf $TEMPDIR/$SERF.tar.bz2
+ unzip -q $TEMPDIR/$SQLITE.zip
+ mv $NEON neon
+ mv $ZLIB zlib
+ mv $SERF serf
+ mv $SQLITE sqlite-amalgamation
+ tar jxf $TEMPDIR/$APR.tar.bz2
+ tar jxf $TEMPDIR/$APR_UTIL.tar.bz2
+ mv $APR apr
+ mv $APR_UTIL apr-util
+ cd $TEMPDIR
+
+ mkdir $BASEDIR/win32-dependencies
+ cd $BASEDIR/win32-dependencies
+ tar zxf $TEMPDIR/$NEON.tar.gz
+ tar jxf $TEMPDIR/$ZLIB.tar.bz2
+ tar jxf $TEMPDIR/$SERF.tar.bz2
+ unzip -q $TEMPDIR/$SQLITE.zip
+ mv $NEON neon
+ mv $ZLIB zlib
+ mv $SERF serf
+ mv $SQLITE sqlite-amalgamation
+ if [ -n "$WIN32_APR_VIA_HTTPD" ]; then
+ unzip -q $TEMPDIR/$HTTPD-win32-src$HTTPD_OOPS.zip
+ for i in apr apr-util apr-iconv; do
+ mv $HTTPD/srclib/$i .
+ done
+ rm -rf $HTTPD
+ else
+ unzip -q $TEMPDIR/$APR-win32-src.zip
+ unzip -q $TEMPDIR/$APR_UTIL-win32-src.zip
+ unzip -q $TEMPDIR/$APR_ICONV-win32-src$APR_ICONV_OOPS.zip
+ mv $APR apr
+ mv $APR_UTIL apr-util
+ mv $APR_ICONV apr-iconv
+ fi
+
+ cd $BASEDIR
+ mv unix-dependencies subversion-$SVN_VERSION
+ tar jcf subversion-deps-$SVN_VERSION.tar.bz2 subversion-$SVN_VERSION
+ tar zcf subversion-deps-$SVN_VERSION.tar.gz subversion-$SVN_VERSION
+ rm -rf subversion-$SVN_VERSION
+ mv win32-dependencies subversion-$SVN_VERSION
+ zip -qr subversion-deps-$SVN_VERSION.zip subversion-$SVN_VERSION
+ rm -rf subversion-$SVN_VERSION
+}
+
+if [ -z "$1" ]; then
+ echo "Please provide a Subversion release number."
+ echo "Example: ./`basename $0` 1.6.19"
+ exit 1
+fi
+
+create_deps "$1"
diff --git a/tools/dist/nightly.sh b/tools/dist/nightly.sh
index b20b641..0f2f991 100755
--- a/tools/dist/nightly.sh
+++ b/tools/dist/nightly.sh
@@ -55,15 +55,15 @@ head=`$svn info $repo/trunk | grep '^Revision' | cut -d ' ' -f 2`
# Get the latest versions of the rolling scripts
for i in release.py dist.sh
do
- $svn export -r $head $repo/trunk/tools/dist/$i@$head $dir/$i
+ $svn export --force -r $head $repo/trunk/tools/dist/$i@$head $dir/$i
done
# We also need ezt
-$svn export -r $head $repo/trunk/build/generator/ezt.py@$head $dir/ezt.py
+$svn export --force -r $head $repo/trunk/build/generator/ezt.py@$head $dir/ezt.py
# Create the environment
cd roll
echo '----------------building environment------------------'
-../release.py --base-dir ${abscwd}/roll build-env
+../release.py --base-dir ${abscwd}/roll build-env trunk-nightly
# Roll the tarballs
echo '-------------------rolling tarball--------------------'
@@ -72,11 +72,11 @@ cd ..
# Create the information page
echo '-------------------moving results---------------------'
-./release.py --base-dir ${abscwd}/roll post-candidates trunk-nightly $head \
- --target $target
+# ./release.py --base-dir ${abscwd}/roll post-candidates trunk-nightly $head \
+# --target $target
if [ ! -d "$target/dist" ]; then mkdir "$target/dist"; fi
if [ -d "$target/dist/r$head" ]; then rm -r "$target/dist/r$head"; fi
-mv $target/deploy $target/dist/r$head
+mv roll/deploy $target/dist/r$head
# Some static links for the most recent artifacts.
ln -sf "r$head" "$target/dist/current"
diff --git a/tools/dist/rat-excludes b/tools/dist/rat-excludes
index 1f817c7..c5db3e4 100644
--- a/tools/dist/rat-excludes
+++ b/tools/dist/rat-excludes
@@ -15,6 +15,7 @@ doc/doxygen.conf
notes/**
packages/
subversion/tests/cmdline/getopt_tests_data/*
+subversion/tests/cmdline/diff_tests_data/*
subversion/bindings/swig/NOTES
subversion/libsvn_fs_base/notes/TODO
subversion/libsvn_fs_base/notes/fs-history
@@ -31,8 +32,11 @@ subversion/bindings/ctypes-python/csvn/ext/__init__.py
subversion/tests/cmdline/svntest/err.py
tools/buildbot/master/public_html/buildbot.css
tools/dist/rat-excludes
+tools/dist/_gnupg.py
+tools/dist/templates/*.ezt
tools/dev/iz/defect.dem
tools/dev/iz/ff2csv.command
+tools/dev/benchmarks/suite1/crontab.entry
tools/hook-scripts/mailer/tests/mailer-t1.output
**/*.dump
**/*.icns
diff --git a/tools/dist/release.py b/tools/dist/release.py
index 7a2acb4..bc80549 100755
--- a/tools/dist/release.py
+++ b/tools/dist/release.py
@@ -21,10 +21,10 @@
# About this script:
-# This script is intended to simplify creating Subversion releases, by
-# automating as much as is possible. It works well with our Apache
-# infrastructure, and should make rolling, posting, and announcing
-# releases dirt simple.
+# This script is intended to simplify creating Subversion releases for
+# any of the supported release lines of Subversion.
+# It works well with our Apache infrastructure, and should make rolling,
+# posting, and announcing releases dirt simple.
#
# This script may be run on a number of platforms, but it is intended to
# be run on people.apache.org. As such, it may have dependencies (such
@@ -34,17 +34,22 @@
# It'd be kind of nice to use the Subversion python bindings in this script,
# but people.apache.org doesn't currently have them installed
+# Futures (Python 2.5 compatibility)
+from __future__ import with_statement
+
# Stuff we need
import os
import re
import sys
import glob
+import fnmatch
import shutil
import urllib2
import hashlib
import tarfile
import logging
import datetime
+import tempfile
import operator
import itertools
import subprocess
@@ -61,15 +66,33 @@ except ImportError:
import ezt
-# Our required / recommended versions
-autoconf_ver = '2.68'
-libtool_ver = '2.4'
-swig_ver = '2.0.4'
+# Our required / recommended release tool versions by release branch
+tool_versions = {
+ 'trunk' : {
+ 'autoconf' : '2.68',
+ 'libtool' : '2.4',
+ 'swig' : '2.0.4',
+ },
+ '1.7' : {
+ 'autoconf' : '2.68',
+ 'libtool' : '2.4',
+ 'swig' : '2.0.4',
+ },
+ '1.6' : {
+ 'autoconf' : '2.64',
+ 'libtool' : '1.5.26',
+ 'swig' : '1.3.36',
+ },
+}
# Some constants
repos = 'http://svn.apache.org/repos/asf/subversion'
-people_host = 'minotaur.apache.org'
-people_dist_dir = '/www/www.apache.org/dist/subversion'
+secure_repos = 'https://svn.apache.org/repos/asf/subversion'
+dist_repos = 'https://dist.apache.org/repos/dist'
+dist_dev_url = dist_repos + '/dev/subversion'
+dist_release_url = dist_repos + '/release/subversion'
+KEYS = 'https://people.apache.org/keys/group/subversion.asc'
+extns = ['zip', 'tar.gz', 'tar.bz2']
#----------------------------------------------------------------------
@@ -79,6 +102,17 @@ class Version(object):
regex = re.compile('(\d+).(\d+).(\d+)(?:-(?:(rc|alpha|beta)(\d+)))?')
def __init__(self, ver_str):
+ # Special case the 'trunk-nightly' version
+ if ver_str == 'trunk-nightly':
+ self.major = None
+ self.minor = None
+ self.patch = None
+ self.pre = 'nightly'
+ self.pre_num = None
+ self.base = 'nightly'
+ self.branch = 'trunk'
+ return
+
match = self.regex.search(ver_str)
if not match:
@@ -96,6 +130,7 @@ class Version(object):
self.pre_num = None
self.base = '%d.%d.%d' % (self.major, self.minor, self.patch)
+ self.branch = '%d.%d' % (self.major, self.minor)
def is_prerelease(self):
return self.pre != None
@@ -122,7 +157,10 @@ class Version(object):
def __str(self):
if self.pre:
- extra = '-%s%d' % (self.pre, self.pre_num)
+ if self.pre == 'nightly':
+ return 'nightly'
+ else:
+ extra = '-%s%d' % (self.pre, self.pre_num)
else:
extra = ''
@@ -175,10 +213,6 @@ def download_file(url, target):
target_file = open(target, 'w')
target_file.write(response.read())
-def assert_people():
- if os.uname()[1] != people_host:
- raise RuntimeError('Not running on expected host "%s"' % people_host)
-
#----------------------------------------------------------------------
# Cleaning up the environment
@@ -241,10 +275,11 @@ class RollDep(object):
class AutoconfDep(RollDep):
- def __init__(self, base_dir, use_existing, verbose):
+ def __init__(self, base_dir, use_existing, verbose, autoconf_ver):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'autoconf'
self._filebase = 'autoconf-' + autoconf_ver
+ self._autoconf_ver = autoconf_ver
self._url = 'http://ftp.gnu.org/gnu/autoconf/%s.tar.gz' % self._filebase
def have_usable(self):
@@ -252,7 +287,7 @@ class AutoconfDep(RollDep):
if not output: return False
version = output[0].split()[-1:][0]
- return version == autoconf_ver
+ return version == self._autoconf_ver
def use_system(self):
if not self._use_existing: return False
@@ -260,18 +295,18 @@ class AutoconfDep(RollDep):
class LibtoolDep(RollDep):
- def __init__(self, base_dir, use_existing, verbose):
+ def __init__(self, base_dir, use_existing, verbose, libtool_ver):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'libtool'
self._filebase = 'libtool-' + libtool_ver
+ self._libtool_ver = libtool_ver
self._url = 'http://ftp.gnu.org/gnu/libtool/%s.tar.gz' % self._filebase
def have_usable(self):
output = self._test_version(['libtool', '--version'])
if not output: return False
- version = output[0].split()[-1:][0]
- return version == libtool_ver
+ return self._libtool_ver in output[0]
def use_system(self):
# We unconditionally return False here, to avoid using a borked
@@ -280,10 +315,11 @@ class LibtoolDep(RollDep):
class SwigDep(RollDep):
- def __init__(self, base_dir, use_existing, verbose, sf_mirror):
+ def __init__(self, base_dir, use_existing, verbose, swig_ver, sf_mirror):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'swig'
self._filebase = 'swig-' + swig_ver
+ self._swig_ver = swig_ver
self._url = 'http://sourceforge.net/projects/swig/files/swig/%(swig)s/%(swig)s.tar.gz/download?use_mirror=%(sf_mirror)s' % \
{ 'swig' : self._filebase,
'sf_mirror' : sf_mirror }
@@ -294,7 +330,7 @@ class SwigDep(RollDep):
if not output: return False
version = output[1].split()[-1:][0]
- return version == swig_ver
+ return version == self._swig_ver
def use_system(self):
if not self._use_existing: return False
@@ -312,9 +348,12 @@ def build_env(args):
if not args.use_existing:
raise
- autoconf = AutoconfDep(args.base_dir, args.use_existing, args.verbose)
- libtool = LibtoolDep(args.base_dir, args.use_existing, args.verbose)
+ autoconf = AutoconfDep(args.base_dir, args.use_existing, args.verbose,
+ tool_versions[args.version.branch]['autoconf'])
+ libtool = LibtoolDep(args.base_dir, args.use_existing, args.verbose,
+ tool_versions[args.version.branch]['libtool'])
swig = SwigDep(args.base_dir, args.use_existing, args.verbose,
+ tool_versions[args.version.branch]['swig'],
args.sf_mirror)
# iterate over our rolling deps, and build them if needed
@@ -328,64 +367,78 @@ def build_env(args):
#----------------------------------------------------------------------
# Create release artifacts
+def compare_changes(repos, branch, revision):
+ mergeinfo_cmd = ['svn', 'mergeinfo', '--show-revs=eligible',
+ repos + '/trunk/CHANGES',
+ repos + '/' + branch + '/' + 'CHANGES']
+ proc = subprocess.Popen(mergeinfo_cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ rc = proc.wait()
+ if stderr:
+ raise RuntimeError('svn mergeinfo failed: %s' % stderr)
+ if stdout:
+ # Treat this as a warning since we are now putting entries for future
+ # minor releases in CHANGES on trunk.
+ logging.warning('CHANGES has unmerged revisions: %s' %
+ stdout.replace("\n", " "))
+
def roll_tarballs(args):
'Create the release artifacts.'
- extns = ['zip', 'tar.gz', 'tar.bz2']
if args.branch:
branch = args.branch
else:
- branch = args.version.base[:-1] + 'x'
+ branch = 'branches/%d.%d.x' % (args.version.major, args.version.minor)
logging.info('Rolling release %s from branch %s@%d' % (args.version,
branch, args.revnum))
# Ensure we've got the appropriate rolling dependencies available
- autoconf = AutoconfDep(args.base_dir, False, args.verbose)
- libtool = LibtoolDep(args.base_dir, False, args.verbose)
- swig = SwigDep(args.base_dir, False, args.verbose, None)
+ autoconf = AutoconfDep(args.base_dir, False, args.verbose,
+ tool_versions[args.version.branch]['autoconf'])
+ libtool = LibtoolDep(args.base_dir, False, args.verbose,
+ tool_versions[args.version.branch]['libtool'])
+ swig = SwigDep(args.base_dir, False, args.verbose,
+ tool_versions[args.version.branch]['swig'], None)
for dep in [autoconf, libtool, swig]:
if not dep.have_usable():
raise RuntimeError('Cannot find usable %s' % dep.label)
- # Make sure CHANGES is sync'd
if branch != 'trunk':
- trunk_CHANGES = '%s/trunk/CHANGES@%d' % (repos, args.revnum)
- branch_CHANGES = '%s/branches/%s/CHANGES@%d' % (repos, branch,
- args.revnum)
- proc = subprocess.Popen(['svn', 'diff', '--summarize', branch_CHANGES,
- trunk_CHANGES],
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- (stdout, stderr) = proc.communicate()
- proc.wait()
+ # Make sure CHANGES is sync'd.
+ compare_changes(repos, branch, args.revnum)
- if stdout:
- raise RuntimeError('CHANGES not synced between trunk and branch')
+ # Ensure the output directory doesn't already exist
+ if os.path.exists(get_deploydir(args.base_dir)):
+ raise RuntimeError('output directory \'%s\' already exists'
+ % get_deploydir(args.base_dir))
- # Create the output directory
- if not os.path.exists(get_deploydir(args.base_dir)):
- os.mkdir(get_deploydir(args.base_dir))
+ os.mkdir(get_deploydir(args.base_dir))
# For now, just delegate to dist.sh to create the actual artifacts
extra_args = ''
if args.version.is_prerelease():
- extra_args = '-%s %d' % (args.version.pre, args.version.pre_num)
- logging.info('Building UNIX tarballs')
- run_script(args.verbose, '%s/dist.sh -v %s -pr %s -r %d %s'
- % (sys.path[0], args.version.base, branch, args.revnum,
- extra_args) )
+ if args.version.pre == 'nightly':
+ extra_args = '-nightly'
+ else:
+ extra_args = '-%s %d' % (args.version.pre, args.version.pre_num)
+ # Build Unix last to leave Unix-style svn_version.h for tagging
logging.info('Buildling Windows tarballs')
run_script(args.verbose, '%s/dist.sh -v %s -pr %s -r %d -zip %s'
% (sys.path[0], args.version.base, branch, args.revnum,
extra_args) )
+ logging.info('Building UNIX tarballs')
+ run_script(args.verbose, '%s/dist.sh -v %s -pr %s -r %d %s'
+ % (sys.path[0], args.version.base, branch, args.revnum,
+ extra_args) )
# Move the results to the deploy directory
logging.info('Moving artifacts and calculating checksums')
for e in extns:
if args.version.pre == 'nightly':
- filename = 'subversion-trunk.%s' % e
+ filename = 'subversion-nightly.%s' % e
else:
filename = 'subversion-%s.%s' % (args.version, e)
@@ -395,53 +448,86 @@ def roll_tarballs(args):
m.update(open(filename, 'r').read())
open(filename + '.sha1', 'w').write(m.hexdigest())
- shutil.move('svn_version.h.dist', get_deploydir(args.base_dir))
+ shutil.move('svn_version.h.dist',
+ get_deploydir(args.base_dir) + '/' + 'svn_version.h.dist'
+ + '-' + str(args.version))
# And we're done!
-
#----------------------------------------------------------------------
-# Post the candidate release artifacts
+# Sign the candidate release artifacts
+
+def sign_candidates(args):
+ 'Sign candidate artifacts in the dist development directory.'
+
+ def sign_file(filename):
+ asc_file = open(filename + '.asc', 'a')
+ logging.info("Signing %s" % filename)
+ proc = subprocess.Popen(['gpg', '-ba', '-o', '-', filename],
+ stdout=asc_file)
+ proc.wait()
+ asc_file.close()
-def post_candidates(args):
- 'Post the generated tarballs to web-accessible directory.'
if args.target:
target = args.target
else:
- target = os.path.join(os.getenv('HOME'), 'public_html', 'svn',
- args.version)
+ target = get_deploydir(args.base_dir)
- if args.code_name:
- dirname = args.code_name
- else:
- dirname = 'deploy'
+ for e in extns:
+ filename = os.path.join(target, 'subversion-%s.%s' % (args.version, e))
+ sign_file(filename)
+ if args.version.major >= 1 and args.version.minor <= 6:
+ filename = os.path.join(target,
+ 'subversion-deps-%s.%s' % (args.version, e))
+ sign_file(filename)
- if not os.path.exists(target):
- os.makedirs(target)
- data = { 'version' : args.version,
- 'revnum' : args.revnum,
- 'dirname' : dirname,
- }
+#----------------------------------------------------------------------
+# Post the candidate release artifacts
- # Choose the right template text
- if args.version.is_prerelease():
- if args.version.pre == 'nightly':
- template_filename = 'nightly-candidates.ezt'
- else:
- template_filename = 'rc-candidates.ezt'
+def post_candidates(args):
+ 'Post candidate artifacts to the dist development directory.'
+
+ logging.info('Importing tarballs to %s' % dist_dev_url)
+ svn_cmd = ['svn', 'import', '-m',
+ 'Add %s candidate release artifacts' % args.version.base,
+ '--auto-props', '--config-option',
+ 'config:auto-props:*.asc=svn:eol-style=native;svn:mime-type=text/plain',
+ get_deploydir(args.base_dir), dist_dev_url]
+ if (args.username):
+ svn_cmd += ['--username', args.username]
+ proc = subprocess.Popen(svn_cmd)
+ (stdout, stderr) = proc.communicate()
+ proc.wait()
+
+#----------------------------------------------------------------------
+# Create tag
+
+def create_tag(args):
+ 'Create tag in the repository'
+
+ logging.info('Creating tag for %s' % str(args.version))
+
+ if args.branch:
+ branch = secure_repos + '/' + args.branch
else:
- template_filename = 'stable-candidates.ezt'
+ branch = secure_repos + '/branches/%d.%d.x' % (args.version.major,
+ args.version.minor)
- template = ezt.Template()
- template.parse(get_tmplfile(template_filename).read())
- template.generate(open(os.path.join(target, 'index.html'), 'w'), data)
+ tag = secure_repos + '/tags/' + str(args.version)
- logging.info('Moving tarballs to %s' % os.path.join(target, dirname))
- if os.path.exists(os.path.join(target, dirname)):
- shutil.rmtree(os.path.join(target, dirname))
- shutil.copytree(get_deploydir(args.base_dir), os.path.join(target, dirname))
+ svnmucc_cmd = ['svnmucc', '-m',
+ 'Tagging release ' + str(args.version)]
+ if (args.username):
+ svnmucc_cmd += ['--username', args.username]
+ svnmucc_cmd += ['cp', str(args.revnum), branch, tag]
+ svnmucc_cmd += ['put', os.path.join(get_deploydir(args.base_dir),
+ 'svn_version.h.dist'),
+ tag + '/subversion/include/svn_version.h']
+ # don't redirect stdout/stderr since svnmucc might ask for a password
+ proc = subprocess.Popen(svnmucc_cmd)
+ proc.wait()
#----------------------------------------------------------------------
# Clean dist
@@ -449,30 +535,76 @@ def post_candidates(args):
def clean_dist(args):
'Clean the distribution directory of all but the most recent artifacts.'
- regex = re.compile('subversion-(\d+).(\d+).(\d+)(?:-(?:(rc|alpha|beta)(\d+)))?')
-
- if not args.dist_dir:
- assert_people()
- args.dist_dir = people_dist_dir
+ proc = subprocess.Popen(['svn', 'list', dist_release_url],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ proc.wait()
+ if stderr:
+ raise RuntimeError(stderr)
- logging.info('Cleaning dist dir \'%s\'' % args.dist_dir)
+ filenames = stdout.split('\n')
+ tar_gz_archives = []
+ for entry in filenames:
+ if fnmatch.fnmatch(entry, 'subversion-*.tar.gz'):
+ tar_gz_archives.append(entry)
- filenames = glob.glob(os.path.join(args.dist_dir, 'subversion-*.tar.gz'))
versions = []
- for filename in filenames:
- versions.append(Version(filename))
-
+ for archive in tar_gz_archives:
+ versions.append(Version(archive))
+
+ svnmucc_cmd = ['svnmucc', '-m', 'Remove old Subversion releases.\n' +
+ 'They are still available at ' +
+ 'http://archive.apache.org/dist/subversion/']
+ if (args.username):
+ svnmucc_cmd += ['--username', args.username]
for k, g in itertools.groupby(sorted(versions),
lambda x: (x.major, x.minor)):
releases = list(g)
logging.info("Saving release '%s'", releases[-1])
for r in releases[:-1]:
- for filename in glob.glob(os.path.join(args.dist_dir,
- 'subversion-%s.*' % r)):
+ for filename in filenames:
+ if fnmatch.fnmatch(filename, 'subversion-%s.*' % r):
logging.info("Removing '%s'" % filename)
- os.remove(filename)
+ svnmucc_cmd += ['rm', dist_release_url + '/' + filename]
+
+ # don't redirect stdout/stderr since svnmucc might ask for a password
+ proc = subprocess.Popen(svnmucc_cmd)
+ proc.wait()
+
+#----------------------------------------------------------------------
+# Move to dist
+
+def move_to_dist(args):
+ 'Move candidate artifacts to the distribution directory.'
+
+ proc = subprocess.Popen(['svn', 'list', dist_dev_url],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ proc.wait()
+ if stderr:
+ raise RuntimeError(stderr)
+
+ filenames = []
+ for entry in stdout.split('\n'):
+ if fnmatch.fnmatch(entry, 'subversion-%s.*' % str(args.version)):
+ filenames.append(entry)
+ svnmucc_cmd = ['svnmucc', '-m',
+ 'Publish Subversion-%s.' % str(args.version)]
+ if (args.username):
+ svnmucc_cmd += ['--username', args.username]
+ svnmucc_cmd += ['rm', dist_dev_url + '/' + 'svn_version.h.dist'
+ + '-' + str(args.version)]
+ for filename in filenames:
+ svnmucc_cmd += ['mv', dist_dev_url + '/' + filename,
+ dist_release_url + '/' + filename]
+ # don't redirect stdout/stderr since svnmucc might ask for a password
+ logging.info('Moving release artifacts to %s' % dist_release_url)
+ proc = subprocess.Popen(svnmucc_cmd)
+ proc.wait()
#----------------------------------------------------------------------
# Write announcements
@@ -481,6 +613,7 @@ def write_news(args):
'Write text for the Subversion website.'
data = { 'date' : datetime.date.today().strftime('%Y%m%d'),
'date_pres' : datetime.date.today().strftime('%Y-%m-%d'),
+ 'major-minor' : '%d.%d' % (args.version.major, args.version.minor),
'version' : str(args.version),
'version_base' : args.version.base,
}
@@ -495,9 +628,15 @@ def write_news(args):
template.generate(sys.stdout, data)
-def get_sha1info(args):
+def get_sha1info(args, replace=False):
'Return a list of sha1 info for the release'
- sha1s = glob.glob(os.path.join(get_deploydir(args.base_dir), '*.sha1'))
+
+ if args.target:
+ target = args.target
+ else:
+ target = get_deploydir(args.base_dir)
+
+ sha1s = glob.glob(os.path.join(target, 'subversion*-%s*.sha1' % args.version))
class info(object):
pass
@@ -505,7 +644,13 @@ def get_sha1info(args):
sha1info = []
for s in sha1s:
i = info()
- i.filename = os.path.basename(s)[:-5]
+ # strip ".sha1"
+ fname = os.path.basename(s)[:-5]
+ if replace:
+ # replace the version number with the [version] reference
+ i.filename = Version.regex.sub('[version]', fname)
+ else:
+ i.filename = fname
i.sha1 = open(s, 'r').read()
sha1info.append(i)
@@ -515,11 +660,13 @@ def get_sha1info(args):
def write_announcement(args):
'Write the release announcement.'
sha1info = get_sha1info(args)
+ siginfo = "\n".join(get_siginfo(args, True)) + "\n"
- data = { 'version' : args.version,
+ data = { 'version' : str(args.version),
'sha1info' : sha1info,
- 'siginfo' : open('getsigs-output', 'r').read(),
- 'major-minor' : args.version.base[:3],
+ 'siginfo' : siginfo,
+ 'major-minor' : '%d.%d' % (args.version.major,
+ args.version.minor),
'major-minor-patch' : args.version.base,
}
@@ -533,6 +680,104 @@ def write_announcement(args):
template.generate(sys.stdout, data)
+def write_downloads(args):
+ 'Output the download section of the website.'
+ sha1info = get_sha1info(args, replace=True)
+
+ data = { 'version' : str(args.version),
+ 'fileinfo' : sha1info,
+ }
+
+ template = ezt.Template(compress_whitespace = False)
+ template.parse(get_tmplfile('download.ezt').read())
+ template.generate(sys.stdout, data)
+
+
+#----------------------------------------------------------------------
+# Validate the signatures for a release
+
+key_start = '-----BEGIN PGP SIGNATURE-----'
+fp_pattern = re.compile(r'^pub\s+(\w+\/\w+)[^\n]*\n\s+Key\sfingerprint\s=((\s+[0-9A-F]{4}){10})\nuid\s+([^<\(]+)\s')
+
+def get_siginfo(args, quiet=False):
+ 'Returns a list of signatures for the release.'
+
+ try:
+ import gnupg
+ except ImportError:
+ import _gnupg as gnupg
+ gpg = gnupg.GPG()
+
+ if args.target:
+ target = args.target
+ else:
+ target = get_deploydir(args.base_dir)
+
+ good_sigs = {}
+ fingerprints = {}
+ output = []
+
+ glob_pattern = os.path.join(target, 'subversion*-%s*.asc' % args.version)
+ for filename in glob.glob(glob_pattern):
+ text = open(filename).read()
+ keys = text.split(key_start)
+
+ if not quiet:
+ logging.info("Checking %d sig(s) in %s" % (len(keys[1:]), filename))
+ for key in keys[1:]:
+ fd, fn = tempfile.mkstemp()
+ os.write(fd, key_start + key)
+ os.close(fd)
+ verified = gpg.verify_file(open(fn, 'rb'), filename[:-4])
+ os.unlink(fn)
+
+ if verified.valid:
+ good_sigs[verified.key_id[-8:]] = True
+ else:
+ sys.stderr.write("BAD SIGNATURE for %s\n" % filename)
+ if verified.key_id:
+ sys.stderr.write(" key id: %s\n" % verified.key_id)
+ sys.exit(1)
+
+ for id in good_sigs.keys():
+ gpg = subprocess.Popen(['gpg', '--fingerprint', id],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ rc = gpg.wait()
+ gpg_output = gpg.stdout.read()
+ if rc:
+ print(gpg_output)
+ sys.stderr.write("UNABLE TO GET FINGERPRINT FOR %s" % id)
+ sys.exit(1)
+
+ gpg_output = "\n".join([ l for l in gpg_output.splitlines()
+ if l[0:7] != 'Warning' ])
+
+ fp = fp_pattern.match(gpg_output).groups()
+ fingerprints["%s [%s] %s" % (fp[3], fp[0], fp[1])] = fp
+
+ for entry in sorted(fingerprints.keys()):
+ fp = fingerprints[entry]
+ output.append(" %s [%s] with fingerprint:" % (fp[3], fp[0]))
+ output.append(" %s" % fp[1])
+
+ return output
+
+def check_sigs(args):
+ 'Check the signatures for the release.'
+
+ output = get_siginfo(args)
+ for line in output:
+ print(line)
+
+def get_keys(args):
+ 'Import the LDAP-based KEYS file to gpg'
+ # We use a tempfile because urlopen() objects don't have a .fileno()
+ with tempfile.SpooledTemporaryFile() as fd:
+ fd.write(urllib2.urlopen(KEYS).read())
+ fd.flush()
+ fd.seek(0)
+ subprocess.check_call(['gpg', '--import'], stdin=fd)
+
#----------------------------------------------------------------------
# Main entry point for argument parsing and handling
@@ -557,6 +802,8 @@ def main():
help='''Download release prerequisistes, including autoconf,
libtool, and swig.''')
subparser.set_defaults(func=build_env)
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--sf-mirror', default='softlayer',
help='''The mirror to use for downloading files from
SourceForge. If in the EU, you may want to use
@@ -576,31 +823,59 @@ def main():
subparser.add_argument('--branch',
help='''The branch to base the release on.''')
+ # Setup the parser for the sign-candidates subcommand
+ subparser = subparsers.add_parser('sign-candidates',
+ help='''Sign the release artifacts.''')
+ subparser.set_defaults(func=sign_candidates)
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
+ subparser.add_argument('--target',
+ help='''The full path to the directory containing
+ release artifacts.''')
+
# Setup the parser for the post-candidates subcommand
subparser = subparsers.add_parser('post-candidates',
- help='''Build the website to host the candidate tarballs.
- The default location is somewhere in ~/public_html.
- ''')
+ help='''Commit candidates to the release development area
+ of the dist.apache.org repository.''')
subparser.set_defaults(func=post_candidates)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
+ subparser.add_argument('--username',
+ help='''Username for ''' + dist_repos + '''.''')
+
+ # Setup the parser for the create-tag subcommand
+ subparser = subparsers.add_parser('create-tag',
+ help='''Create the release tag.''')
+ subparser.set_defaults(func=create_tag)
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('revnum', type=int,
help='''The revision number to base the release on.''')
- subparser.add_argument('--target',
- help='''The full path to the destination.''')
- subparser.add_argument('--code-name',
- help='''A whimsical name for the release, used only for
- naming the download directory.''')
+ subparser.add_argument('--branch',
+ help='''The branch to base the release on.''')
+ subparser.add_argument('--username',
+ help='''Username for ''' + secure_repos + '''.''')
# The clean-dist subcommand
subparser = subparsers.add_parser('clean-dist',
help='''Clean the distribution directory (and mirrors) of
- all but the most recent MAJOR.MINOR release. If no
- dist-dir is given, this command will assume it is
- running on people.apache.org.''')
+ all but the most recent MAJOR.MINOR release.''')
subparser.set_defaults(func=clean_dist)
subparser.add_argument('--dist-dir',
help='''The directory to clean.''')
+ subparser.add_argument('--username',
+ help='''Username for ''' + dist_repos + '''.''')
+
+ # The move-to-dist subcommand
+ subparser = subparsers.add_parser('move-to-dist',
+ help='''Move candiates and signatures from the temporary
+ release dev location to the permanent distribution
+ directory.''')
+ subparser.set_defaults(func=move_to_dist)
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
+ subparser.add_argument('--username',
+ help='''Username for ''' + dist_repos + '''.''')
# The write-news subcommand
subparser = subparsers.add_parser('write-news',
@@ -610,12 +885,40 @@ def main():
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
+ # write-announcement
subparser = subparsers.add_parser('write-announcement',
help='''Output to stdout template text for the emailed
release announcement.''')
subparser.set_defaults(func=write_announcement)
+ subparser.add_argument('--target',
+ help='''The full path to the directory containing
+ release artifacts.''')
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
+
+ # write-downloads
+ subparser = subparsers.add_parser('write-downloads',
+ help='''Output to stdout template text for the download
+ table for subversion.apache.org''')
+ subparser.set_defaults(func=write_downloads)
+ subparser.add_argument('version', type=Version,
+ help='''The release label, such as '1.7.0-alpha1'.''')
+
+ # check-sigs
+ subparser = subparsers.add_parser('check-sigs',
+ help='''Output to stdout the signatures collected for this
+ release''')
+ subparser.set_defaults(func=check_sigs)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
+ subparser.add_argument('--target',
+ help='''The full path to the directory containing
+ release artifacts.''')
+
+ # get-keys
+ subparser = subparsers.add_parser('get-keys',
+ help='''Import committers' public keys to ~/.gpg/''')
+ subparser.set_defaults(func=get_keys)
# A meta-target
subparser = subparsers.add_parser('clean',
diff --git a/tools/dist/templates/download.ezt b/tools/dist/templates/download.ezt
new file mode 100644
index 0000000..601818d
--- /dev/null
+++ b/tools/dist/templates/download.ezt
@@ -0,0 +1,13 @@
+<p style="font-size: 150%; text-align: center;">Subversion [version]</p>
+<table class="centered">
+<tr>
+ <th>File</th>
+ <th>Checksum (SHA1)</th>
+ <th>Signatures</th>
+</tr>
+[for fileinfo]<tr>
+ <td><a href="[[]preferred]subversion/[fileinfo.filename]">[fileinfo.filename]</a></td>
+ <td class="checksum">[fileinfo.sha1]</td>
+ <td>[<a href="http://www.apache.org/dist/subversion/[fileinfo.filename].asc">PGP</a>]</td>
+</tr>[end]
+</table>
diff --git a/tools/dist/templates/nightly-candidates.ezt b/tools/dist/templates/nightly-candidates.ezt
index c2c6fcd..6e18a1a 100644
--- a/tools/dist/templates/nightly-candidates.ezt
+++ b/tools/dist/templates/nightly-candidates.ezt
@@ -59,7 +59,4 @@ made available to users who rely on their operating system distro's
packages.</p>
<p>If you want to help us test this distribution of Subversion, you
-can find the files <a href="[dirname]/">here</a>.</p>
-
-</body>
-</html>
+can find the files below.</p>
diff --git a/tools/dist/templates/rc-candidates.ezt b/tools/dist/templates/rc-candidates.ezt
deleted file mode 100644
index 0f46de8..0000000
--- a/tools/dist/templates/rc-candidates.ezt
+++ /dev/null
@@ -1,63 +0,0 @@
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html>
-<head>
-<title>Apache Subversion [version] tarballs</title>
-</head>
-<body style="font-size: 14pt; text-align: justify;
- background-color: #f0f0f0; padding: 0 5%">
-<h1 style="font-size: 30pt; text-align: center;
- text-decoration: underline">WARNING</h1>
-
-<p>The code you are about to download is a <i>Release Candidate</i>
-for Apache Subversion [version] (r[revnum]).</p>
-
-<p>These are candidate tarballs for a pre-release version of Subversion. As
-such, they are likely to contain bugs, some known, others unknown. You
-are welcome and encouraged to test this release, and
-<a href="http://subversion.apache.org/docs/community-guide/issues.html">report
-bugs</a> back to the developers, but please keep in mind that this is
-not a final release of Apache Subversion.</p>
-
-<p>If you are looking for a copy of Subversion for production use, this
-is <i>not it</i>; you should instead grab the latest stable release
-from the <a
-href="http://subversion.apache.org/download/">Download area</a>.</p>
-
-<h2 style="font-size: 18pt">Note to operating system distro package
-maintainers</h2>
-
-<p>As stated above, this is <i>not</i> an official, end-user release
-of Subversion. It is a distribution intended for testing only, and has not
-been publicly announced. When it has been announced, it still won't be
-suitable for production use. If you chose to package this pre-release for
-your operating system distro's management system, you must do so in a way which
-clearly denotes that this is not the final release, and is only for testing
-purposes. And please don't do so until it has been publicly announced.</p>
-
-<p>If you want to help us test this distribution of Subversion, you
-can find the files <a href="[dirname]/">here</a>.</p>
-
-</body>
-</html>
diff --git a/tools/dist/templates/rc-news.ezt b/tools/dist/templates/rc-news.ezt
index b0468e4..959735c 100644
--- a/tools/dist/templates/rc-news.ezt
+++ b/tools/dist/templates/rc-news.ezt
@@ -4,16 +4,16 @@
title="Link to this section">&para;</a>
</h3>
-<p>We are please to announce to release of Apache Subversion [version]. This
+<p>We are pleased to announce the release of Apache Subversion [version]. This
release is not intended for production use, but is provided as a milestone
to encourage wider testing and feedback from intrepid users and maintainers.
Please see the
<a href="">release
announcement</a> for more information about this release, and the
- <a href="/docs/release-notes/[version_base].html">release notes</a> and
+ <a href="/docs/release-notes/[major-minor].html">release notes</a> and
<a href="http://svn.apache.org/repos/asf/subversion/tags/[version]/CHANGES">
change log</a> for information about what will eventually be
- in the [version_base].0 release.</p>
+ in the [version_base] release.</p>
<p>To get this release from the nearest mirror, please visit our
<a href="/download/#pre-releases">download page</a>.</p>
diff --git a/tools/dist/templates/stable-candidates.ezt b/tools/dist/templates/stable-candidates.ezt
deleted file mode 100644
index 3320bbb..0000000
--- a/tools/dist/templates/stable-candidates.ezt
+++ /dev/null
@@ -1,97 +0,0 @@
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html>
-<head>
-<title>Apache Subversion [version] tarballs</title>
-</head>
-<body style="font-size: 14pt; text-align: justify;
- background-color: #f0f0f0; padding: 0 5%">
-<h1 style="font-size: 30pt; text-align: center;
- text-decoration: underline">WARNING</h1>
-
-<p>The code you are about to download is a <i>Release Candidate</i>
-for Apache Subversion [version] (r[revnum]).</p>
-
-
-<p>A <i>Release Candidate</i> is exactly what it sounds like: a
-distribution of Subversion that may become an official release later,
-<i>if and only if</i> it passes preliminary testing by those members
-of the community who are interested in testing it.</p>
-
-<!-- , which means it is considered <strong
- style="text-decoration: underline">UNRELEASED</strong> code. The
- term 'release candidate' means the code works to the best knowledge
- of the Subversion developers, but that it still requires testing by a
- larger number of people to root out bugs.</p> -->
-
-<p>As such, if you are interested in helping us test this Release
-Candidate, you're very welcome to download and test these packages.
-If you are looking for a copy of Subversion for production use, this
-is <i>not it</i>; you should instead grab the latest stable release
-from the <a
-href="http://subversion.tigris.org/project_packages.html">Download
-area</a>.</p>
-
-<h2 style="font-size: 18pt">Note to operating system distro package
-maintainers</h2>
-
-<p>As stated above, this is <i>not</i> an official, end-user release
-of Subversion. It is a distribution intended for testing only. Please
-do <i>not</i> package this distribution in any way. It should not be
-made available to users who rely on their operating system distro's
-packages.</p>
-
-<h2 style="font-size: 14pt">Why shouldn't I set up/make available a
-Release Candidate for production use?</h2>
-
-<p style="font-size: 11pt">(Taken from a <a
- href="http://svn.haxx.se/dev/archive-2005-11/1295.shtml"
->mail by Karl Fogel</a> on the subject)</p>
-
-<p style="font-size: 11pt">Subversion release candidates are for
-testing only. We might have to withdraw one to fix bugs, and fixing
-those bugs might involve changing APIs, or changing a soft-upgrade
-strategy in the repository or working copy formats. If some production
-users had begun depending on the new API, or had unknowingly
-soft-upgraded their repository or working copy, then they'd be in for
-a very unpleasant suprise when the real release comes out and doesn't
-have the same API anymore, or doesn't use the same formats. Not only
-would Subversion suddenly "stop working" for them, but there wouldn't
-be any convenient path to get it working again, since no blessed
-Subversion release would have the code needed to interpret their
-legacy data.</p>
-
-<p style="font-size: 11pt">We encourage RC testing by users who know
-how to install from a tarball independently of their OS's packaging
-system. Users who install only packaged releases, however, should wait
-for and use only officially released Subversions. Anything else is
-playing with fire. When the inevitable blowup happens, both your
-reputation as a packager and Subversion's reputation will suffer --
-but only one will deserve it.</p>
-
-<p>If you want to help us test this distribution of Subversion, you
-can find the files <a href="[dirname]/">here</a>.</p>
-
-</body>
-</html>
diff --git a/tools/dist/templates/stable-news.ezt b/tools/dist/templates/stable-news.ezt
new file mode 100644
index 0000000..aee573f
--- /dev/null
+++ b/tools/dist/templates/stable-news.ezt
@@ -0,0 +1,19 @@
+<div class="h3" id="news-[date]">
+<h3>[date_pres] &mdash; Apache Subversion [version] Released
+ <a class="sectionlink" href="#news-[date]"
+ title="Link to this section">&para;</a>
+</h3>
+
+<p>We are pleased to announce the release of Apache Subversion [version].
+ This is the most complete Subversion release to date, and we encourage
+ users of Subversion to upgrade as soon as reasonable. Please see the
+ <a href=""
+ >release announcement</a> and the
+ <a href="http://svn.apache.org/repos/asf/subversion/tags/[version]/CHANGES"
+ >change log</a> for more information about this release.</p>
+
+<p>To get this release from the nearest mirror, please visit our
+ <a href="/download/#recommended-release">download page</a>.</p>
+
+</div> <!-- #news-[date] -->
+
diff --git a/tools/examples/SvnCLBrowse b/tools/examples/SvnCLBrowse
index 43e16ef..fc4c765 100755
--- a/tools/examples/SvnCLBrowse
+++ b/tools/examples/SvnCLBrowse
@@ -161,7 +161,7 @@ class _item:
class SvnCLBrowse(wx.App):
def __init__(self, wc_dir):
svn.core.svn_config_ensure(None)
- self.svn_ctx = svn.client.ctx_t()
+ self.svn_ctx = svn.client.svn_client_create_context()
self.svn_ctx.config = svn.core.svn_config_get_config(None)
if wc_dir is not None:
self.wc_dir = svn.core.svn_path_canonicalize(wc_dir)
diff --git a/tools/examples/blame.py b/tools/examples/blame.py
index 87d33b3..14368e5 100755
--- a/tools/examples/blame.py
+++ b/tools/examples/blame.py
@@ -91,9 +91,9 @@ def blame(path, filename, rev=None):
# print ''.join(diffresult)
# print annotresult
for x in range(len(annotresult.keys())):
- sys.stdout.write("Line %d (rev %d):%s" % (x,
- annotresult[x][0],
- annotresult[x][1]))
+ sys.stdout.write("Line %d (r%d):%s" % (x,
+ annotresult[x][0],
+ annotresult[x][1]))
def usage():
print("USAGE: blame.py [-r REV] repos-path file")
diff --git a/tools/examples/get-location-segments.py b/tools/examples/get-location-segments.py
index c084dae..c8b3639 100755
--- a/tools/examples/get-location-segments.py
+++ b/tools/examples/get-location-segments.py
@@ -21,6 +21,7 @@
#
import sys
import os
+import getpass
from svn import client, ra, core
def printer(segment, pool):
@@ -71,6 +72,42 @@ def parse_args(args):
return url, peg_revision, start_revision, end_revision
+def prompt_func_ssl_unknown_cert(realm, failures, cert_info, may_save, pool):
+ print "The certficate details are as follows:"
+ print "--------------------------------------"
+ print "Issuer : " + str(cert_info.issuer_dname)
+ print "Hostname : " + str(cert_info.hostname)
+ print "ValidFrom : " + str(cert_info.valid_from)
+ print "ValidUpto : " + str(cert_info.valid_until)
+ print "Fingerprint: " + str(cert_info.fingerprint)
+ print ""
+ ssl_trust = core.svn_auth_cred_ssl_server_trust_t()
+ if may_save:
+ choice = raw_input( "accept (t)temporarily (p)permanently: ")
+ else:
+ choice = raw_input( "(r)Reject or accept (t)temporarily: ")
+ if choice[0] == "t" or choice[0] == "T":
+ ssl_trust.may_save = False
+ ssl_trust.accepted_failures = failures
+ elif choice[0] == "p" or choice[0] == "P":
+ ssl_trust.may_save = True
+ ssl_trust.accepted_failures = failures
+ else:
+ ssl_trust = None
+ return ssl_trust
+
+def prompt_func_simple_prompt(realm, username, may_save, pool):
+ username = raw_input("username: ")
+ password = getpass.getpass(prompt="password: ")
+ simple_cred = core.svn_auth_cred_simple_t()
+ simple_cred.username = username
+ simple_cred.password = password
+ simple_cred.may_save = False
+ return simple_cred
+
+def prompt_func_gnome_keyring_prompt(keyring, pool):
+ return getpass.getpass(prompt="Password for '%s' GNOME keyring: " % keyring)
+
def main():
try:
url, peg_revision, start_revision, end_revision = parse_args(sys.argv[1:])
@@ -90,15 +127,17 @@ ERROR: %s
sys.exit(1)
core.svn_config_ensure(None)
- ctx = client.ctx_t()
+ ctx = client.svn_client_create_context()
+ ctx.config = core.svn_config_get_config(None)
# Make sure that these are at the start of the list, so passwords from
# gnome-keyring / kwallet are checked before asking for new passwords.
- # Note that we don't pass our config here, since we can't seem to access
- # ctx.config.config (ctx.config is opaque).
- providers = core.svn_auth_get_platform_specific_client_providers(None, None)
+ providers = core.svn_auth_get_platform_specific_client_providers(ctx.config['config'], None)
providers.extend([
client.get_simple_provider(),
+ core.svn_auth_get_ssl_server_trust_file_provider(),
+ core.svn_auth_get_simple_prompt_provider(prompt_func_simple_prompt, 2),
+ core.svn_auth_get_ssl_server_trust_prompt_provider(prompt_func_ssl_unknown_cert),
client.get_username_provider(),
client.get_ssl_server_trust_file_provider(),
client.get_ssl_client_cert_file_provider(),
@@ -106,7 +145,9 @@ ERROR: %s
])
ctx.auth_baton = core.svn_auth_open(providers)
- ctx.config = core.svn_config_get_config(None)
+
+ if hasattr(core, 'svn_auth_set_gnome_keyring_unlock_prompt_func'):
+ core.svn_auth_set_gnome_keyring_unlock_prompt_func(ctx.auth_baton, prompt_func_gnome_keyring_prompt)
ra_callbacks = ra.callbacks_t()
ra_callbacks.auth_baton = ctx.auth_baton
diff --git a/tools/examples/info.rb b/tools/examples/info.rb
index fad6837..e1097e3 100644
--- a/tools/examples/info.rb
+++ b/tools/examples/info.rb
@@ -26,6 +26,7 @@
#
require "svn/core"
+require "svn/ext/core"
require "svn/client"
require "svn/wc"
require "svn/repos"
@@ -45,6 +46,12 @@ simple_prompt = Proc.new do
result.password = STDIN.gets.strip
end
+gnome_keyring_prompt = Proc.new do
+ |keyring_name|
+
+ print "Password for '#{keyring_name}' GNOME keyring: "
+ STDIN.gets.strip
+end
if ARGV.length != 1
puts "Usage: info.rb URL[@REV]"
@@ -58,6 +65,12 @@ else
ctx.add_ssl_client_cert_file_provider
ctx.add_ssl_client_cert_pw_file_provider
+ # Allow asking for the gnome keyring password, in case the keyring is
+ # locked.
+ if Svn::Ext::Core.respond_to?(:svn_auth_set_gnome_keyring_unlock_prompt_func)
+ Svn::Ext::Core::svn_auth_set_gnome_keyring_unlock_prompt_func(ctx.auth_baton, gnome_keyring_prompt)
+ end
+
repos_uri, revision = ARGV[0].split("@", 2)
if revision
revision = Integer(revision)
diff --git a/tools/examples/svnshell.rb b/tools/examples/svnshell.rb
index 3b43853..a49000e 100755
--- a/tools/examples/svnshell.rb
+++ b/tools/examples/svnshell.rb
@@ -125,7 +125,7 @@ class SvnShell
puts("Invalid argument for #{cmd}: #{args.join(' ')}")
end
else
- puts("Unknown command: #{cmd}")
+ puts("Unknown subcommand: #{cmd}")
puts("Try one of these commands: ", WORDS.sort.join(" "))
end
end
diff --git a/tools/examples/walk-config-auth.py b/tools/examples/walk-config-auth.py
new file mode 100755
index 0000000..b3298f3
--- /dev/null
+++ b/tools/examples/walk-config-auth.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import svn.core
+import svn.client
+
+if '--help' in sys.argv:
+ sys.stdout.write("""\
+Usage: %s [CONFIG_DIR]
+
+Crawl the authentication credentials cache under CONFIG_DIR (or the
+default user Subversion runtime configuration directory if not
+provided), displaying what is found and prompting the user regarding
+whether Subversion should or should not delete each cached set of
+credentials found.
+
+""" % (sys.argv[0]))
+ sys.exit(0)
+
+config_dir = svn.core.svn_config_get_user_config_path(None, '')
+if len(sys.argv) > 1:
+ config_dir = sys.argv[1]
+
+svn.core.svn_config_ensure(config_dir)
+
+def print_help():
+ sys.stdout.write("""\
+ Valid actions are as follows:
+ (v) view details of the credentials
+ (d) delete the credentials
+ (n) continue to next credentials
+ (q) quit the program
+ (?) show this help output
+
+""")
+
+def show_creds(hash):
+ hash_keys = hash.keys()
+ maxkeylen = max(map(lambda x: len(x), hash_keys))
+ maxvallen = max(map(lambda x: len(x), hash.values()))
+ hash_keys.sort()
+ sys.stdout.write("+")
+ sys.stdout.write("-" * (maxkeylen + 2))
+ sys.stdout.write("+")
+ sys.stdout.write("-" * (78 - maxkeylen - 2))
+ sys.stdout.write("\n")
+ for key in hash_keys:
+ sys.stdout.write("| %s | %s\n" % (key.ljust(maxkeylen), hash[key]))
+ sys.stdout.write("+")
+ sys.stdout.write("-" * (maxkeylen + 2))
+ sys.stdout.write("+")
+ sys.stdout.write("-" * (78 - maxkeylen - 2))
+ sys.stdout.write("\n")
+
+def walk_func(cred_kind, realmstring, hash, pool):
+ show_creds({ 'cred_kind' : cred_kind,
+ 'realmstring' : realmstring })
+ while 1:
+ yesno = raw_input(" Action (v/d/n/q/?) [n]? ")
+ if yesno == '?':
+ print_help()
+ elif yesno == 'v':
+ show_creds(hash)
+ elif yesno == 'n':
+ return 0
+ elif yesno == 'd':
+ return 1
+ elif yesno == 'q':
+ raise svn.core.SubversionException("", svn.core.SVN_ERR_CEASE_INVOCATION)
+ elif yesno == '':
+ return 0
+ else:
+ sys.stderr.write("ERROR: Invalid input")
+
+svn.core.svn_config_walk_auth_data(config_dir, walk_func)
diff --git a/tools/hook-scripts/commit-access-control.pl.in b/tools/hook-scripts/commit-access-control.pl.in
index 0e5fade..e2a968b 100755
--- a/tools/hook-scripts/commit-access-control.pl.in
+++ b/tools/hook-scripts/commit-access-control.pl.in
@@ -6,7 +6,7 @@
# commit in repository REPOS using the permissions listed in the
# configuration file CONF_FILE.
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/hook-scripts/commit-access-control.pl.in $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/hook-scripts/commit-access-control.pl.in $
# $LastChangedDate: 2009-11-16 19:07:17 +0000 (Mon, 16 Nov 2009) $
# $LastChangedBy: hwright $
# $LastChangedRevision: 880911 $
diff --git a/tools/hook-scripts/control-chars.py b/tools/hook-scripts/control-chars.py
new file mode 100755
index 0000000..17223fe
--- /dev/null
+++ b/tools/hook-scripts/control-chars.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+'''control-chars.py: Subversion repository hook script that rejects filenames
+which contain control characters. Expects to be called like a pre-commit hook:
+ control-chars.py <REPOS-PATH> <TXN-NAME>
+
+Latest version should be available at
+http://svn.apache.org/repos/asf/subversion/trunk/tools/hook-scripts/
+
+See validate-files.py for more generic validations.'''
+
+import sys
+import re
+import posixpath
+
+import svn
+import svn.fs
+import svn.repos
+import svn.core
+
+# Can't hurt to disallow chr(0), though the C API will never pass one anyway.
+control_chars = set( [chr(i) for i in range(32)] )
+control_chars.add(chr(127))
+
+def check_node(node, path):
+ "check NODE for control characters. PATH is used for error messages"
+ if node.action == 'A':
+ if any((c in control_chars) for c in node.name):
+ sys.stderr.write("'%s' contains a control character" % path)
+ return 3
+
+def walk_tree(node, path, callback):
+ "Walk NODE"
+ if not node:
+ return 0
+
+ ret_val = callback(node, path)
+ if ret_val > 0:
+ return ret_val
+
+ node = node.child
+ if not node:
+ return 0
+
+ while node:
+ full_path = posixpath.join(path, node.name)
+ ret_val = walk_tree(node, full_path, callback)
+ # If we ran into an error just return up the stack all the way
+ if ret_val > 0:
+ return ret_val
+ node = node.sibling
+
+ return 0
+
+def usage():
+ sys.stderr.write("Invalid arguments, expects to be called like a pre-commit hook.")
+
+def main(ignored_pool, argv):
+ if len(argv) < 3:
+ usage()
+ return 2
+
+ repos_path = svn.core.svn_path_canonicalize(argv[1])
+ txn_name = argv[2]
+
+ if not repos_path or not txn_name:
+ usage()
+ return 2
+
+ repos = svn.repos.svn_repos_open(repos_path)
+ fs = svn.repos.svn_repos_fs(repos)
+ txn = svn.fs.svn_fs_open_txn(fs, txn_name)
+ txn_root = svn.fs.svn_fs_txn_root(txn)
+ base_rev = svn.fs.svn_fs_txn_base_revision(txn)
+ if base_rev is None or base_rev <= svn.core.SVN_INVALID_REVNUM:
+ sys.stderr.write("Transaction '%s' is not based on a revision" % txn_name)
+ return 2
+ base_root = svn.fs.svn_fs_revision_root(fs, base_rev)
+ editor, editor_baton = svn.repos.svn_repos_node_editor(repos, base_root,
+ txn_root)
+ try:
+ svn.repos.svn_repos_replay2(txn_root, "", svn.core.SVN_INVALID_REVNUM,
+ False, editor, editor_baton, None, None)
+ except svn.core.SubversionException as e:
+ # If we get a file not found error then some file has a newline in it and
+ # fsfs's own transaction is now corrupted.
+ if e.apr_err == svn.core.SVN_ERR_FS_NOT_FOUND:
+ match = re.search("path '(.*?)'", e.message)
+ if not match:
+ sys.stderr.write(repr(e))
+ return 2
+ path = match.group(1)
+ sys.stderr.write("Path name that contains '%s' has a newline." % path)
+ return 3
+ # fs corrupt error probably means that there is probably both
+ # file and file\n in the transaction. However, we can't really determine
+ # which files since the transaction is broken. Even if we didn't reject
+ # this it would not be able to be committed. This just gives a better
+ # error message.
+ elif e.apr_err == svn.core.SVN_ERR_FS_CORRUPT:
+ sys.stderr.write("Some path contains a newline causing: %s" % repr(e))
+ return 3
+ else:
+ sys.stderr.write(repr(e))
+ return 2
+ tree = svn.repos.svn_repos_node_from_baton(editor_baton)
+ return walk_tree(tree, "/", check_node)
+
+if __name__ == '__main__':
+ sys.exit(svn.core.run_app(main, sys.argv))
diff --git a/tools/hook-scripts/mailer/mailer.conf.example b/tools/hook-scripts/mailer/mailer.conf.example
index be04877..3ad28de 100644
--- a/tools/hook-scripts/mailer/mailer.conf.example
+++ b/tools/hook-scripts/mailer/mailer.conf.example
@@ -1,7 +1,7 @@
#
# mailer.conf: example configuration file for mailer.py
#
-# $Id: mailer.conf.example 885511 2009-11-30 17:30:17Z julianfoad $
+# $Id: mailer.conf.example 1439592 2013-01-28 19:20:53Z danielsh $
[general]
@@ -146,6 +146,15 @@
#
# from_addr = %(author)s@example.com
#
+# The substitution variable "repos_basename" is provided, and is set to
+# the directory name of the repository. This can be useful to set
+# a custom subject that can be re-used in multiple repositories:
+#
+# commit_subject_prefix = [svn-%(repos_basename)s]
+#
+# For example if the repository is at /path/to/repo/project-x then
+# the subject of commit emails will be prefixed with [svn-project-x]
+#
#
# SUMMARY
#
diff --git a/tools/hook-scripts/mailer/mailer.py b/tools/hook-scripts/mailer/mailer.py
index 08079fe..65146f1 100755
--- a/tools/hook-scripts/mailer/mailer.py
+++ b/tools/hook-scripts/mailer/mailer.py
@@ -22,10 +22,10 @@
#
# mailer.py: send email describing a commit
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/hook-scripts/mailer/mailer.py $
-# $LastChangedDate: 2010-12-30 20:46:50 +0000 (Thu, 30 Dec 2010) $
-# $LastChangedBy: hwright $
-# $LastChangedRevision: 1053998 $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/hook-scripts/mailer/mailer.py $
+# $LastChangedDate: 2013-04-12 07:44:37 +0000 (Fri, 12 Apr 2013) $
+# $LastChangedBy: rhuijben $
+# $LastChangedRevision: 1467191 $
#
# USAGE: mailer.py commit REPOS REVISION [CONFIG-FILE]
# mailer.py propchange REPOS REVISION AUTHOR REVPROPNAME [CONFIG-FILE]
@@ -98,7 +98,10 @@ def main(pool, cmd, config_fname, repos_dir, cmd_args):
if cmd == 'commit':
revision = int(cmd_args[0])
repos = Repository(repos_dir, revision, pool)
- cfg = Config(config_fname, repos, { 'author' : repos.author })
+ cfg = Config(config_fname, repos,
+ {'author': repos.author,
+ 'repos_basename': os.path.basename(repos.repos_dir)
+ })
messenger = Commit(pool, cfg, repos)
elif cmd == 'propchange' or cmd == 'propchange2':
revision = int(cmd_args[0])
@@ -108,14 +111,20 @@ def main(pool, cmd, config_fname, repos_dir, cmd_args):
repos = Repository(repos_dir, revision, pool)
# Override the repos revision author with the author of the propchange
repos.author = author
- cfg = Config(config_fname, repos, { 'author' : author })
+ cfg = Config(config_fname, repos,
+ {'author': author,
+ 'repos_basename': os.path.basename(repos.repos_dir)
+ })
messenger = PropChange(pool, cfg, repos, author, propname, action)
elif cmd == 'lock' or cmd == 'unlock':
author = cmd_args[0]
repos = Repository(repos_dir, 0, pool) ### any old revision will do
# Override the repos revision author with the author of the lock/unlock
repos.author = author
- cfg = Config(config_fname, repos, { 'author' : author })
+ cfg = Config(config_fname, repos,
+ {'author': author,
+ 'repos_basename': os.path.basename(repos.repos_dir)
+ })
messenger = Lock(pool, cfg, repos, author, cmd == 'lock')
else:
raise UnknownSubcommand(cmd)
@@ -228,6 +237,7 @@ class MailedOutput(OutputBase):
self.reply_to = self.reply_to[3:]
def mail_headers(self, group, params):
+ from email import Utils
subject = self.make_subject(group, params)
try:
subject.encode('ascii')
@@ -237,6 +247,8 @@ class MailedOutput(OutputBase):
hdrs = 'From: %s\n' \
'To: %s\n' \
'Subject: %s\n' \
+ 'Date: %s\n' \
+ 'Message-ID: %s\n' \
'MIME-Version: 1.0\n' \
'Content-Type: text/plain; charset=UTF-8\n' \
'Content-Transfer-Encoding: 8bit\n' \
@@ -245,7 +257,8 @@ class MailedOutput(OutputBase):
'X-Svn-Commit-Revision: %d\n' \
'X-Svn-Commit-Repository: %s\n' \
% (self.from_addr, ', '.join(self.to_addrs), subject,
- group, self.repos.author or 'no_author', self.repos.rev,
+ Utils.formatdate(), Utils.make_msgid(), group,
+ self.repos.author or 'no_author', self.repos.rev,
os.path.basename(self.repos.repos_dir))
if self.reply_to:
hdrs = '%sReply-To: %s\n' % (hdrs, self.reply_to)
@@ -343,7 +356,7 @@ class Commit(Messenger):
editor = svn.repos.ChangeCollector(repos.fs_ptr, repos.root_this, \
self.pool)
e_ptr, e_baton = svn.delta.make_editor(editor, self.pool)
- svn.repos.replay(repos.root_this, e_ptr, e_baton, self.pool)
+ svn.repos.replay2(repos.root_this, "", svn.core.SVN_INVALID_REVNUM, 1, e_ptr, e_baton, None, self.pool)
self.changelist = sorted(editor.get_changes().items())
diff --git a/tools/hook-scripts/mailer/tests/mailer-init.sh b/tools/hook-scripts/mailer/tests/mailer-init.sh
index f51b4c5..ef961b4 100755
--- a/tools/hook-scripts/mailer/tests/mailer-init.sh
+++ b/tools/hook-scripts/mailer/tests/mailer-init.sh
@@ -52,7 +52,7 @@ svn commit -m "initial load"
svn ps prop1 propval1 file1
echo change C1 >> file2
svn ps svn:keywords Id file2
-svn ps svn:new_svn_prop val file2
+svn ps --force svn:new_svn_prop val file2
svn ps prop1 propval1 file2
svn ps prop3 propval3 dir1
echo change C2 >> dir2/file5
@@ -103,6 +103,7 @@ svn commit -m "copy dir, then make a change"
# add a binary file and set property to binary value
echo -e "\x00\x01\x02\x03\x04" > file11
svn add file11
+svn ps svn:mime-type application/octect-stream file11
svn ps prop2 -F file11 file9
svn commit -m "add binary file"
diff --git a/tools/hook-scripts/mailer/tests/mailer-t1.output b/tools/hook-scripts/mailer/tests/mailer-t1.output
index c8f8701..10466ff 100644
--- a/tools/hook-scripts/mailer/tests/mailer-t1.output
+++ b/tools/hook-scripts/mailer/tests/mailer-t1.output
@@ -1,3 +1,30 @@
+Group: file
+Subject: r1 - dir1 dir2
+
+Author: mailer test
+Date: Sun Sep 9 01:46:40 2001
+New Revision: 1
+
+Log:
+initial load
+
+Added:
+ file1
+ file2
+
+Added: file1
+==============================================================================
+--- /dev/null 00:00:00 1970 (empty, because file is newly added)
++++ file1 Sun Sep 9 01:46:40 2001 (r1)
+@@ -0,0 +1 @@
++file1
+
+Added: file2
+==============================================================================
+--- /dev/null 00:00:00 1970 (empty, because file is newly added)
++++ file2 Sun Sep 9 01:46:40 2001 (r1)
+@@ -0,0 +1 @@
++file2
Group: file plus other areas
Subject: r1 - dir1 dir2
@@ -126,32 +153,26 @@ Added: file2
@@ -0,0 +1 @@
+file2
Group: file
-Subject: r1 - dir1 dir2
+Subject: r2 - dir1 dir2
Author: mailer test
-Date: Sun Sep 9 01:46:40 2001
-New Revision: 1
+Date: Sun Sep 9 04:33:20 2001
+New Revision: 2
Log:
-initial load
-
-Added:
- file1
- file2
+two file changes. Fixes Blah#123
-Added: file1
-==============================================================================
---- /dev/null 00:00:00 1970 (empty, because file is newly added)
-+++ file1 Sun Sep 9 01:46:40 2001 (r1)
-@@ -0,0 +1 @@
-+file1
+Modified:
+ file1 (props changed)
+ file2 (contents, props changed)
-Added: file2
+Modified: file2
==============================================================================
---- /dev/null 00:00:00 1970 (empty, because file is newly added)
-+++ file2 Sun Sep 9 01:46:40 2001 (r1)
-@@ -0,0 +1 @@
-+file2
+--- file2 Sun Sep 9 01:46:40 2001 (r1)
++++ file2 Sun Sep 9 04:33:20 2001 (r2)
+@@ -1 +1,2 @@
+ file2
++change C1
Group: bugtracker
Subject: Fix for Blah#123: r2 - dir1 dir2
@@ -250,27 +271,6 @@ Modified: dir2/file5
@@ -1 +1,2 @@
file5
+change C2
-Group: file
-Subject: r2 - dir1 dir2
-
-Author: mailer test
-Date: Sun Sep 9 04:33:20 2001
-New Revision: 2
-
-Log:
-two file changes. Fixes Blah#123
-
-Modified:
- file1 (props changed)
- file2 (contents, props changed)
-
-Modified: file2
-==============================================================================
---- file2 Sun Sep 9 01:46:40 2001 (r1)
-+++ file2 Sun Sep 9 04:33:20 2001 (r2)
-@@ -1 +1,2 @@
- file2
-+change C1
Group: All
Subject: r3 - dir2 dir3
@@ -314,7 +314,7 @@ Copied and modified: dir3/file8 (from r2, file1)
@@ -1 +1,2 @@
file1
+change C3
-Group: file plus other areas
+Group: file
Subject: r5 - dir1 dir3
Author: mailer test
@@ -326,12 +326,7 @@ changes and deletes of properties
Modified:
file2 (props changed)
-
-Changes in other areas also in this revision:
-Modified:
- dir1/ (props changed)
- dir3/ (props changed)
-Group: All
+Group: file plus other areas
Subject: r5 - dir1 dir3
Author: mailer test
@@ -342,10 +337,13 @@ Log:
changes and deletes of properties
Modified:
+ file2 (props changed)
+
+Changes in other areas also in this revision:
+Modified:
dir1/ (props changed)
dir3/ (props changed)
- file2 (props changed)
-Group: file
+Group: All
Subject: r5 - dir1 dir3
Author: mailer test
@@ -356,6 +354,8 @@ Log:
changes and deletes of properties
Modified:
+ dir1/ (props changed)
+ dir3/ (props changed)
file2 (props changed)
Group: file
Subject: r6 - dir1 dir4
@@ -411,8 +411,8 @@ Modified: dir1/file3
@@ -1 +1,2 @@
file3
+change C4
-Group: All
-Subject: r6 - dir1 dir4
+Group: bugtracker
+Subject: Fix for Blaz#456: r6 - dir1 dir4
Author: mailer test
Date: Sun Sep 9 15:40:00 2001
@@ -471,8 +471,8 @@ Added: file9
+++ file9 Sun Sep 9 15:40:00 2001 (r6)
@@ -0,0 +1 @@
+file9
-Group: bugtracker
-Subject: Fix for Blaz#456: r6 - dir1 dir4
+Group: All
+Subject: r6 - dir1 dir4
Author: mailer test
Date: Sun Sep 9 15:40:00 2001
@@ -501,6 +501,26 @@ Added: file9
+++ file9 Sun Sep 9 15:40:00 2001 (r6)
@@ -0,0 +1 @@
+file9
+Group: file
+Subject: r7 - dir1 dir2 dir3 dir3/dir5
+
+Author: mailer test
+Date: Sun Sep 9 18:26:40 2001
+New Revision: 7
+
+Log:
+adds, deletes, and a change
+
+Deleted:
+ file2
+
+Deleted: file2
+==============================================================================
+--- file2 Sun Sep 9 18:26:40 2001 (r6)
++++ /dev/null 00:00:00 1970 (deleted)
+@@ -1,2 +0,0 @@
+-file2
+-change C1
Group: file plus other areas
Subject: r7 - dir1 dir2 dir3 dir3/dir5
@@ -588,26 +608,6 @@ Deleted: file2
@@ -1,2 +0,0 @@
-file2
-change C1
-Group: file
-Subject: r7 - dir1 dir2 dir3 dir3/dir5
-
-Author: mailer test
-Date: Sun Sep 9 18:26:40 2001
-New Revision: 7
-
-Log:
-adds, deletes, and a change
-
-Deleted:
- file2
-
-Deleted: file2
-==============================================================================
---- file2 Sun Sep 9 18:26:40 2001 (r6)
-+++ /dev/null 00:00:00 1970 (deleted)
-@@ -1,2 +0,0 @@
--file2
--change C1
Group: All
Subject: r8 - in dir6: . dir5
@@ -644,7 +644,7 @@ Modified: dir6/file4
@@ -1 +1,2 @@
file4
+change C6
-Group: file plus other areas
+Group: file
Subject: r9 -
Author: mailer test
@@ -662,7 +662,7 @@ Modified:
Added: file11
==============================================================================
Binary file. No diff available.
-Group: All
+Group: file plus other areas
Subject: r9 -
Author: mailer test
@@ -680,7 +680,7 @@ Modified:
Added: file11
==============================================================================
Binary file. No diff available.
-Group: file
+Group: All
Subject: r9 -
Author: mailer test
@@ -698,7 +698,7 @@ Modified:
Added: file11
==============================================================================
Binary file. No diff available.
-Group: file plus other areas
+Group: file
Subject: r10 -
Author: mailer test
@@ -715,7 +715,7 @@ Modified:
Modified: file11
==============================================================================
Binary file (source and/or target). No diff available.
-Group: All
+Group: file plus other areas
Subject: r10 -
Author: mailer test
@@ -732,7 +732,7 @@ Modified:
Modified: file11
==============================================================================
Binary file (source and/or target). No diff available.
-Group: file
+Group: All
Subject: r10 -
Author: mailer test
diff --git a/tools/hook-scripts/persist-ephemeral-txnprops.py b/tools/hook-scripts/persist-ephemeral-txnprops.py
new file mode 100755
index 0000000..6e5697a
--- /dev/null
+++ b/tools/hook-scripts/persist-ephemeral-txnprops.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import sys
+import os
+from svn import repos, fs, core
+
+def duplicate_ephemeral_txnprops(repos_path, txn_name):
+ fs_ptr = repos.fs(repos.open(repos_path))
+ txn_t = fs.open_txn(fs_ptr, txn_name)
+ for name, value in fs.txn_proplist(txn_t).items():
+ if name.startswith(core.SVN_PROP_TXN_PREFIX):
+ name = core.SVN_PROP_REVISION_PREFIX + \
+ name[len(core.SVN_PROP_TXN_PREFIX):]
+ fs.change_txn_prop(txn_t, name, value)
+
+def usage_and_exit(errmsg=None):
+ stream = errmsg and sys.stderr or sys.stdout
+ stream.write("""\
+Usage:
+
+ persist-ephemeral-txnprops.py REPOS_PATH TXN_NAME
+
+Duplicate ephemeral transaction properties so that the information
+they carry may persist as properties of the revision created once the
+transaction is committed. This is intended to be used as a Subversion
+pre-commit hook script.
+
+REPOS_PATH is the on-disk path of the repository whose transaction
+properties are being examined/modified. TXN_NAME is the name of the
+transaction.
+
+Ephemeral transaction properties, whose names all begin with the
+prefix "%s", will be copied to new properties which use the
+prefix "%s" instead.
+
+""" % (core.SVN_PROP_TXN_PREFIX, core.SVN_PROP_REVISION_PREFIX))
+ if errmsg:
+ stream.write("ERROR: " + errmsg + "\n")
+ sys.exit(errmsg and 1 or 0)
+
+def main():
+ argc = len(sys.argv)
+ if argc != 3:
+ usage_and_exit("Incorrect number of arguments.")
+ repos_path = sys.argv[1]
+ txn_name = sys.argv[2]
+ duplicate_ephemeral_txnprops(repos_path, txn_name)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/hook-scripts/svn2feed.py b/tools/hook-scripts/svn2feed.py
index 0075cfc..b4ba2ac 100755
--- a/tools/hook-scripts/svn2feed.py
+++ b/tools/hook-scripts/svn2feed.py
@@ -70,7 +70,7 @@ Options:
# is actually set only on initial feed creation, and thereafter simply
# re-used from the pickle each time.
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/hook-scripts/svn2feed.py $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/hook-scripts/svn2feed.py $
# $LastChangedDate: 2009-11-16 19:07:17 +0000 (Mon, 16 Nov 2009) $
# $LastChangedBy: hwright $
# $LastChangedRevision: 880911 $
diff --git a/tools/hook-scripts/svnperms.py b/tools/hook-scripts/svnperms.py
index 519e64e..14fbf7a 100755
--- a/tools/hook-scripts/svnperms.py
+++ b/tools/hook-scripts/svnperms.py
@@ -21,7 +21,7 @@
#
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/hook-scripts/svnperms.py $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/hook-scripts/svnperms.py $
# $LastChangedDate: 2011-07-12 18:37:44 +0000 (Tue, 12 Jul 2011) $
# $LastChangedBy: blair $
# $LastChangedRevision: 1145712 $
diff --git a/tools/hook-scripts/validate-files.conf.example b/tools/hook-scripts/validate-files.conf.example
new file mode 100644
index 0000000..f37981f
--- /dev/null
+++ b/tools/hook-scripts/validate-files.conf.example
@@ -0,0 +1,69 @@
+# DEFAULT section can be used to place options that can be referenced in
+# other section values with the %(option)s syntax. Note that the svnlook
+# value below is required as it is used by the script to determine the path
+# to the svnlook command in order to determine the changes. Feel free
+# to create additional values here that you can reuse in other options,
+# especially the command options to make it easier to maintain.
+[DEFAULT]
+svnlook = /usr/local/bin/svnlook
+#svnauthz = /usr/local/bin/svn-tools/svnauthz
+#xmllint = /usr/bin/xmllint
+
+# The repositories section has key value pairs where the key is a pattern
+# to match on the repository path and the value is a space separated list of
+# rules to apply to that repository. Multiple patterns can match and all
+# unique rules will be applied. The pattern is a Unix shell-style wildcard.
+# As seen below all repositories will have the svnauthz-validate and xmllint
+# rules applied and repositories in /repos or below will have admin-rw-authz
+# applied.
+[repositories]
+#* = svnauthz-validate xmllint
+#/repos/* = admin-rw-authz
+
+# Rules allow you define a pattern to match against which files in the
+# repository to run a command against. Rules are defined by creating a
+# section name starting with 'rule:' as seen below.
+#
+# The pattern option is a Unix shell-style wildcard match against the
+# files in the repo that the rule will be run for. A leading / in your
+# pattern will be ignored. Paths segments are / separated regardless of
+# platform.
+#
+# The command option is the command to run, this command will be run via
+# the shell of your platform. The following environment variables will
+# be defined for you:
+# REPO = the path of the repository for the commit.
+# TXN = the transaction id of the commit.
+# FILE = the name of the file that matched the pattern.
+#
+# IMPORTANT: AS A CONSEQUENCE OF THE USE OF THE SHELL IT IS IMPORTANT TO
+# QUOTE THE ARGUMENTS OF YOUR COMMANDS. THE FILE VARIABLE DOES CONTAIN
+# USER GENERATED DATA AND SHELL METACHARACTERS ARE NOT ESCAPED FOR YOU!
+#
+# The following examples assume a POSIX shell, if your platform has a
+# different shell you may need to adjust them. For example on Windows
+# cmd.exe uses %VARIABLENAME% instead of $VARIABLENAME to expand environment
+# variables.
+#
+# The following rule runs the svnauthz command's validate subcommand
+# for file named authz in the conf subdir if it is present in the commit.
+# This is a simple way to ensure that invalid authz files are not allowed
+# to be committed.
+#[rule:svnauthz-validate]
+#pattern = conf/authz
+#command = '%(svnauthz)s' validate -t "$TXN" "$REPO" "$FILE"
+
+# The following rule runs the svnauthz command's accessof subcommand
+# for any file ending in .authz for the conf subdir and checks that the admin
+# user has rw rights to the same file. This can be used to prevent an
+# authz file being committed that would remove access for the admin user.
+# Note that accessof also validates the validity of the file as well as
+# checking the permissions, so it's unecessary to run validate and accessof.
+#[rule:admin-rw-authz]
+#pattern = /conf/*.authz
+#command = '%(svnauthz)s' accessof --username admin --path "$FILE" --is rw -t "$TXN" "$REPO" "$FILE"
+
+# Use the xmllint command to validate all files ending in .xml
+#[rule:xmllint]
+#pattern = *.xml
+#command = '%(svnlook)s' cat -t "$TXN" "$REPO" "$FILE" | '%(xmllint)s' --noout -
diff --git a/tools/hook-scripts/validate-files.py b/tools/hook-scripts/validate-files.py
new file mode 100755
index 0000000..7169251
--- /dev/null
+++ b/tools/hook-scripts/validate-files.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Subversion pre-commit hook script that runs user configured commands
+to validate files in the commit and reject the commit if the commands
+exit with a non-zero exit code. The script expects a validate-files.conf
+file placed in the conf dir under the repo the commit is for."""
+
+import sys
+import os
+import subprocess
+import fnmatch
+
+# Deal with the rename of ConfigParser to configparser in Python3
+try:
+ # Python >= 3.0
+ import configparser
+except ImportError:
+ # Python < 3.0
+ import ConfigParser as configparser
+
+class Config(configparser.SafeConfigParser):
+ """Superclass of SafeConfigParser with some customizations
+ for this script"""
+ def optionxform(self, option):
+ """Redefine optionxform so option names are case sensitive"""
+ return option
+
+ def getlist(self, section, option):
+ """Returns value of option as a list using whitespace to
+ split entries"""
+ value = self.get(section, option)
+ if value:
+ return value.split()
+ else:
+ return None
+
+ def get_matching_rules(self, repo):
+ """Return list of unique rules names that apply to a given repo"""
+ rules = {}
+ for option in self.options('repositories'):
+ if fnmatch.fnmatch(repo, option):
+ for rule in self.getlist('repositories', option):
+ rules[rule] = True
+ return rules.keys()
+
+ def get_rule_section_name(self, rule):
+ """Given a rule name provide the section name it is defined in."""
+ return 'rule:%s' % (rule)
+
+class Commands:
+ """Class to handle logic of running commands"""
+ def __init__(self, config):
+ self.config = config
+
+ def svnlook_changed(self, repo, txn):
+ """Provide list of files changed in txn of repo"""
+ svnlook = self.config.get('DEFAULT', 'svnlook')
+ cmd = "'%s' changed -t '%s' '%s'" % (svnlook, txn, repo)
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ changed = []
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ line = line.decode().strip()
+ text_mod = line[0:1]
+ # Only if the contents of the file changed (by addition or update)
+ # directories always end in / in the svnlook changed output
+ if line[-1] != "/" and (text_mod == "A" or text_mod == "U"):
+ changed.append(line[4:])
+
+ # wait on the command to finish so we can get the
+ # returncode/stderr output
+ data = p.communicate()
+ if p.returncode != 0:
+ sys.stderr.write(data[1].decode())
+ sys.exit(2)
+
+ return changed
+
+ def user_command(self, section, repo, txn, fn):
+ """ Run the command defined for a given section.
+ Replaces $REPO, $TXN and $FILE with the repo, txn and fn arguments
+ in the defined command.
+
+ Returns a tuple of the exit code and the stderr output of the command"""
+ cmd = self.config.get(section, 'command')
+ cmd_env = os.environ.copy()
+ cmd_env['REPO'] = repo
+ cmd_env['TXN'] = txn
+ cmd_env['FILE'] = fn
+ p = subprocess.Popen(cmd, shell=True, env=cmd_env, stderr=subprocess.PIPE)
+ data = p.communicate()
+ return (p.returncode, data[1].decode())
+
+def main(repo, txn):
+ exitcode = 0
+ config = Config()
+ config.read(os.path.join(repo, 'conf', 'validate-files.conf'))
+ commands = Commands(config)
+
+ rules = config.get_matching_rules(repo)
+
+ # no matching rules so nothing to do
+ if len(rules) == 0:
+ sys.exit(0)
+
+ changed = commands.svnlook_changed(repo, txn)
+ # this shouldn't ever happen
+ if len(changed) == 0:
+ sys.exit(0)
+
+ for rule in rules:
+ section = config.get_rule_section_name(rule)
+ pattern = config.get(section, 'pattern')
+
+ # skip leading slashes if present in the pattern
+ if pattern[0] == '/': pattern = pattern[1:]
+
+ for fn in fnmatch.filter(changed, pattern):
+ (returncode, err_mesg) = commands.user_command(section, repo,
+ txn, fn)
+ if returncode != 0:
+ sys.stderr.write(
+ "\nError validating file '%s' with rule '%s' " \
+ "(exit code %d):\n" % (fn, rule, returncode))
+ sys.stderr.write(err_mesg)
+ exitcode = 1
+
+ return exitcode
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ sys.stderr.write("invalid args\n")
+ sys.exit(0)
+
+ try:
+ sys.exit(main(sys.argv[1], sys.argv[2]))
+ except configparser.Error as e:
+ sys.stderr.write("Error with the validate-files.conf: %s\n" % e)
+ sys.exit(2)
diff --git a/tools/server-side/fsfs-reshard.py b/tools/server-side/fsfs-reshard.py
index d039885..16d2fcd 100755
--- a/tools/server-side/fsfs-reshard.py
+++ b/tools/server-side/fsfs-reshard.py
@@ -46,7 +46,7 @@
# under the License.
# ====================================================================
#
-# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.7.x/tools/server-side/fsfs-reshard.py $
+# $HeadURL: http://svn.apache.org/repos/asf/subversion/branches/1.8.x/tools/server-side/fsfs-reshard.py $
# $LastChangedDate: 2009-11-16 19:07:17 +0000 (Mon, 16 Nov 2009) $
# $LastChangedBy: hwright $
# $LastChangedRevision: 880911 $
diff --git a/tools/server-side/fsfs-stats.c b/tools/server-side/fsfs-stats.c
new file mode 100644
index 0000000..80a09f9
--- /dev/null
+++ b/tools/server-side/fsfs-stats.c
@@ -0,0 +1,2181 @@
+/* fsfs-stats.c -- gather size statistics on FSFS repositories
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+
+#include <assert.h>
+
+#include <apr.h>
+#include <apr_general.h>
+#include <apr_file_io.h>
+#include <apr_poll.h>
+
+#include "svn_pools.h"
+#include "svn_diff.h"
+#include "svn_io.h"
+#include "svn_utf.h"
+#include "svn_dirent_uri.h"
+#include "svn_sorts.h"
+#include "svn_delta.h"
+#include "svn_hash.h"
+#include "svn_cache_config.h"
+
+#include "private/svn_string_private.h"
+#include "private/svn_subr_private.h"
+#include "private/svn_dep_compat.h"
+#include "private/svn_cache.h"
+
+#ifndef _
+#define _(x) x
+#endif
+
+#define ERROR_TAG "fsfs-stats: "
+
+/* We group representations into 2x2 different kinds plus one default:
+ * [dir / file] x [text / prop]. The assignment is done by the first node
+ * that references the respective representation.
+ */
+typedef enum rep_kind_t
+{
+ /* The representation is _directly_ unused, i.e. not referenced by any
+ * noderev. However, some other representation may use it as delta base.
+ * null value. Should not occur in real-word repositories. */
+ unused_rep,
+
+ /* a properties on directory rep */
+ dir_property_rep,
+
+ /* a properties on file rep */
+ file_property_rep,
+
+ /* a directory rep */
+ dir_rep,
+
+ /* a file rep */
+ file_rep
+} rep_kind_t;
+
+/* A representation fragment.
+ */
+typedef struct representation_t
+{
+ /* absolute offset in the file */
+ apr_size_t offset;
+
+ /* item length in bytes */
+ apr_size_t size;
+
+ /* item length after de-deltification */
+ apr_size_t expanded_size;
+
+ /* deltification base, or NULL if there is none */
+ struct representation_t *delta_base;
+
+ /* revision that contains this representation
+ * (may be referenced by other revisions, though) */
+ svn_revnum_t revision;
+
+ /* number of nodes that reference this representation */
+ apr_uint32_t ref_count;
+
+ /* length of the PLAIN / DELTA line in the source file in bytes */
+ apr_uint16_t header_size;
+
+ /* classification of the representation. values of rep_kind_t */
+ char kind;
+
+ /* the source content has a PLAIN header, so we may simply copy the
+ * source content into the target */
+ char is_plain;
+
+} representation_t;
+
+/* Represents a single revision.
+ * There will be only one instance per revision. */
+typedef struct revision_info_t
+{
+ /* number of this revision */
+ svn_revnum_t revision;
+
+ /* pack file offset (manifest value), 0 for non-packed files */
+ apr_size_t offset;
+
+ /* offset of the changes list relative to OFFSET */
+ apr_size_t changes;
+
+ /* length of the changes list on bytes */
+ apr_size_t changes_len;
+
+ /* offset of the changes list relative to OFFSET */
+ apr_size_t change_count;
+
+ /* first offset behind the revision data in the pack file (file length
+ * for non-packed revs) */
+ apr_size_t end;
+
+ /* number of directory noderevs in this revision */
+ apr_size_t dir_noderev_count;
+
+ /* number of file noderevs in this revision */
+ apr_size_t file_noderev_count;
+
+ /* total size of directory noderevs (i.e. the structs - not the rep) */
+ apr_size_t dir_noderev_size;
+
+ /* total size of file noderevs (i.e. the structs - not the rep) */
+ apr_size_t file_noderev_size;
+
+ /* all representation_t of this revision (in no particular order),
+ * i.e. those that point back to this struct */
+ apr_array_header_t *representations;
+} revision_info_t;
+
+/* Data type to identify a representation. It will be used to address
+ * cached combined (un-deltified) windows.
+ */
+typedef struct window_cache_key_t
+{
+ /* revision of the representation */
+ svn_revnum_t revision;
+
+ /* its offset */
+ apr_size_t offset;
+} window_cache_key_t;
+
+/* Description of one large representation. It's content will be reused /
+ * overwritten when it gets replaced by an even larger representation.
+ */
+typedef struct large_change_info_t
+{
+ /* size of the (deltified) representation */
+ apr_size_t size;
+
+ /* revision of the representation */
+ svn_revnum_t revision;
+
+ /* node path. "" for unused instances */
+ svn_stringbuf_t *path;
+} large_change_info_t;
+
+/* Container for the largest representations found so far. The capacity
+ * is fixed and entries will be inserted by reusing the last one and
+ * reshuffling the entry pointers.
+ */
+typedef struct largest_changes_t
+{
+ /* number of entries allocated in CHANGES */
+ apr_size_t count;
+
+ /* size of the smallest change */
+ apr_size_t min_size;
+
+ /* changes kept in this struct */
+ large_change_info_t **changes;
+} largest_changes_t;
+
+/* Information we gather per size bracket.
+ */
+typedef struct histogram_line_t
+{
+ /* number of item that fall into this bracket */
+ apr_int64_t count;
+
+ /* sum of values in this bracket */
+ apr_int64_t sum;
+} histogram_line_t;
+
+/* A histogram of 64 bit integer values.
+ */
+typedef struct histogram_t
+{
+ /* total sum over all brackets */
+ histogram_line_t total;
+
+ /* one bracket per binary step.
+ * line[i] is the 2^(i-1) <= x < 2^i bracket */
+ histogram_line_t lines[64];
+} histogram_t;
+
+/* Information we collect per file ending.
+ */
+typedef struct extension_info_t
+{
+ /* file extension, including leading "."
+ * "(none)" in the container for files w/o extension. */
+ const char *extension;
+
+ /* histogram of representation sizes */
+ histogram_t rep_histogram;
+
+ /* histogram of sizes of changed files */
+ histogram_t node_histogram;
+} extension_info_t;
+
+/* Root data structure containing all information about a given repository.
+ */
+typedef struct fs_fs_t
+{
+ /* repository to reorg */
+ const char *path;
+
+ /* revision to start at (must be 0, ATM) */
+ svn_revnum_t start_revision;
+
+ /* FSFS format number */
+ int format;
+
+ /* highest revision number in the repo */
+ svn_revnum_t max_revision;
+
+ /* first non-packed revision */
+ svn_revnum_t min_unpacked_rev;
+
+ /* sharing size*/
+ int max_files_per_dir;
+
+ /* all revisions */
+ apr_array_header_t *revisions;
+
+ /* empty representation.
+ * Used as a dummy base for DELTA reps without base. */
+ representation_t *null_base;
+
+ /* undeltified txdelta window cache */
+ svn_cache__t *window_cache;
+
+ /* track the biggest contributors to repo size */
+ largest_changes_t *largest_changes;
+
+ /* history of representation sizes */
+ histogram_t rep_size_histogram;
+
+ /* history of sizes of changed nodes */
+ histogram_t node_size_histogram;
+
+ /* history of unused representations */
+ histogram_t unused_rep_histogram;
+
+ /* history of sizes of changed files */
+ histogram_t file_histogram;
+
+ /* history of sizes of file representations */
+ histogram_t file_rep_histogram;
+
+ /* history of sizes of changed file property sets */
+ histogram_t file_prop_histogram;
+
+ /* history of sizes of file property representations */
+ histogram_t file_prop_rep_histogram;
+
+ /* history of sizes of changed directories (in bytes) */
+ histogram_t dir_histogram;
+
+ /* history of sizes of directories representations */
+ histogram_t dir_rep_histogram;
+
+ /* history of sizes of changed directories property sets */
+ histogram_t dir_prop_histogram;
+
+ /* history of sizes of directories property representations */
+ histogram_t dir_prop_rep_histogram;
+
+ /* extension -> extension_info_t* map */
+ apr_hash_t *by_extension;
+} fs_fs_t;
+
+/* Return the rev pack folder for revision REV in FS.
+ */
+static const char *
+get_pack_folder(fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return apr_psprintf(pool, "%s/db/revs/%ld.pack",
+ fs->path, rev / fs->max_files_per_dir);
+}
+
+/* Return the path of the file containing revision REV in FS.
+ */
+static const char *
+rev_or_pack_file_name(fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return fs->min_unpacked_rev > rev
+ ? svn_dirent_join(get_pack_folder(fs, rev, pool), "pack", pool)
+ : apr_psprintf(pool, "%s/db/revs/%ld/%ld", fs->path,
+ rev / fs->max_files_per_dir, rev);
+}
+
+/* Open the file containing revision REV in FS and return it in *FILE.
+ */
+static svn_error_t *
+open_rev_or_pack_file(apr_file_t **file,
+ fs_fs_t *fs,
+ svn_revnum_t rev,
+ apr_pool_t *pool)
+{
+ return svn_io_file_open(file,
+ rev_or_pack_file_name(fs, rev, pool),
+ APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT,
+ pool);
+}
+
+/* Return the length of FILE in *FILE_SIZE. Use POOL for allocations.
+*/
+static svn_error_t *
+get_file_size(apr_off_t *file_size,
+ apr_file_t *file,
+ apr_pool_t *pool)
+{
+ apr_finfo_t finfo;
+
+ SVN_ERR(svn_io_file_info_get(&finfo, APR_FINFO_SIZE, file, pool));
+
+ *file_size = finfo.size;
+ return SVN_NO_ERROR;
+}
+
+/* Get the file content of revision REVISION in FS and return it in *CONTENT.
+ * Read the LEN bytes starting at file OFFSET. When provided, use FILE as
+ * packed or plain rev file.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+get_content(svn_stringbuf_t **content,
+ apr_file_t *file,
+ fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_off_t offset,
+ apr_size_t len,
+ apr_pool_t *pool)
+{
+ apr_pool_t * file_pool = svn_pool_create(pool);
+ apr_size_t large_buffer_size = 0x10000;
+
+ if (file == NULL)
+ SVN_ERR(open_rev_or_pack_file(&file, fs, revision, file_pool));
+
+ *content = svn_stringbuf_create_ensure(len, pool);
+ (*content)->len = len;
+
+#if APR_VERSION_AT_LEAST(1,3,0)
+ /* for better efficiency use larger buffers on large reads */
+ if ( (len >= large_buffer_size)
+ && (apr_file_buffer_size_get(file) < large_buffer_size))
+ apr_file_buffer_set(file,
+ apr_palloc(apr_file_pool_get(file),
+ large_buffer_size),
+ large_buffer_size);
+#endif
+
+ SVN_ERR(svn_io_file_seek(file, APR_SET, &offset, pool));
+ SVN_ERR(svn_io_file_read_full2(file, (*content)->data, len,
+ NULL, NULL, pool));
+ svn_pool_destroy(file_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* In *RESULT, return the cached txdelta window stored in REPRESENTATION
+ * within FS. If that has not been found in cache, return NULL.
+ * Allocate the result in POOL.
+ */
+static svn_error_t *
+get_cached_window(svn_stringbuf_t **result,
+ fs_fs_t *fs,
+ representation_t *representation,
+ apr_pool_t *pool)
+{
+ svn_boolean_t found = FALSE;
+ window_cache_key_t key;
+ key.revision = representation->revision;
+ key.offset = representation->offset;
+
+ *result = NULL;
+ return svn_error_trace(svn_cache__get((void**)result, &found,
+ fs->window_cache,
+ &key, pool));
+}
+
+/* Cache the undeltified txdelta WINDOW for REPRESENTATION within FS.
+ * Use POOL for temporaries.
+ */
+static svn_error_t *
+set_cached_window(fs_fs_t *fs,
+ representation_t *representation,
+ svn_stringbuf_t *window,
+ apr_pool_t *pool)
+{
+ /* select entry */
+ window_cache_key_t key;
+ key.revision = representation->revision;
+ key.offset = representation->offset;
+
+ return svn_error_trace(svn_cache__set(fs->window_cache, &key, window,
+ pool));
+}
+
+/* Initialize the LARGEST_CHANGES member in FS with a capacity of COUNT
+ * entries. Use POOL for allocations.
+ */
+static void
+initialize_largest_changes(fs_fs_t *fs,
+ apr_size_t count,
+ apr_pool_t *pool)
+{
+ apr_size_t i;
+
+ fs->largest_changes = apr_pcalloc(pool, sizeof(*fs->largest_changes));
+ fs->largest_changes->count = count;
+ fs->largest_changes->min_size = 1;
+ fs->largest_changes->changes
+ = apr_palloc(pool, count * sizeof(*fs->largest_changes->changes));
+
+ /* allocate *all* entries before the path stringbufs. This increases
+ * cache locality and enhances performance significantly. */
+ for (i = 0; i < count; ++i)
+ fs->largest_changes->changes[i]
+ = apr_palloc(pool, sizeof(**fs->largest_changes->changes));
+
+ /* now initialize them and allocate the stringbufs */
+ for (i = 0; i < count; ++i)
+ {
+ fs->largest_changes->changes[i]->size = 0;
+ fs->largest_changes->changes[i]->revision = SVN_INVALID_REVNUM;
+ fs->largest_changes->changes[i]->path
+ = svn_stringbuf_create_ensure(1024, pool);
+ }
+}
+
+/* Add entry for SIZE to HISTOGRAM.
+ */
+static void
+add_to_histogram(histogram_t *histogram,
+ apr_int64_t size)
+{
+ apr_int64_t shift = 0;
+
+ while (((apr_int64_t)(1) << shift) <= size)
+ shift++;
+
+ histogram->total.count++;
+ histogram->total.sum += size;
+ histogram->lines[(apr_size_t)shift].count++;
+ histogram->lines[(apr_size_t)shift].sum += size;
+}
+
+/* Update data aggregators in FS with this representation of type KIND, on-
+ * disk REP_SIZE and expanded node size EXPANDED_SIZE for PATH in REVSION.
+ */
+static void
+add_change(fs_fs_t *fs,
+ apr_int64_t rep_size,
+ apr_int64_t expanded_size,
+ svn_revnum_t revision,
+ const char *path,
+ rep_kind_t kind)
+{
+ /* identify largest reps */
+ if (rep_size >= fs->largest_changes->min_size)
+ {
+ apr_size_t i;
+ large_change_info_t *info
+ = fs->largest_changes->changes[fs->largest_changes->count - 1];
+ info->size = rep_size;
+ info->revision = revision;
+ svn_stringbuf_set(info->path, path);
+
+ /* linear insertion but not too bad since count is low and insertions
+ * near the end are more likely than close to front */
+ for (i = fs->largest_changes->count - 1; i > 0; --i)
+ if (fs->largest_changes->changes[i-1]->size >= rep_size)
+ break;
+ else
+ fs->largest_changes->changes[i] = fs->largest_changes->changes[i-1];
+
+ fs->largest_changes->changes[i] = info;
+ fs->largest_changes->min_size
+ = fs->largest_changes->changes[fs->largest_changes->count-1]->size;
+ }
+
+ /* global histograms */
+ add_to_histogram(&fs->rep_size_histogram, rep_size);
+ add_to_histogram(&fs->node_size_histogram, expanded_size);
+
+ /* specific histograms by type */
+ switch (kind)
+ {
+ case unused_rep: add_to_histogram(&fs->unused_rep_histogram,
+ rep_size);
+ break;
+ case dir_property_rep: add_to_histogram(&fs->dir_prop_rep_histogram,
+ rep_size);
+ add_to_histogram(&fs->dir_prop_histogram,
+ expanded_size);
+ break;
+ case file_property_rep: add_to_histogram(&fs->file_prop_rep_histogram,
+ rep_size);
+ add_to_histogram(&fs->file_prop_histogram,
+ expanded_size);
+ break;
+ case dir_rep: add_to_histogram(&fs->dir_rep_histogram,
+ rep_size);
+ add_to_histogram(&fs->dir_histogram,
+ expanded_size);
+ break;
+ case file_rep: add_to_histogram(&fs->file_rep_histogram,
+ rep_size);
+ add_to_histogram(&fs->file_histogram,
+ expanded_size);
+ break;
+ }
+
+ /* by extension */
+ if (kind == file_rep)
+ {
+ /* determine extension */
+ extension_info_t *info;
+ const char * file_name = strrchr(path, '/');
+ const char * extension = file_name ? strrchr(file_name, '.') : NULL;
+
+ if (extension == NULL || extension == file_name + 1)
+ extension = "(none)";
+
+ /* get / auto-insert entry for this extension */
+ info = apr_hash_get(fs->by_extension, extension, APR_HASH_KEY_STRING);
+ if (info == NULL)
+ {
+ apr_pool_t *pool = apr_hash_pool_get(fs->by_extension);
+ info = apr_pcalloc(pool, sizeof(*info));
+ info->extension = apr_pstrdup(pool, extension);
+
+ apr_hash_set(fs->by_extension, info->extension,
+ APR_HASH_KEY_STRING, info);
+ }
+
+ /* update per-extension histogram */
+ add_to_histogram(&info->node_histogram, expanded_size);
+ add_to_histogram(&info->rep_histogram, rep_size);
+ }
+}
+
+/* Given rev pack PATH in FS, read the manifest file and return the offsets
+ * in *MANIFEST. Use POOL for allocations.
+ */
+static svn_error_t *
+read_manifest(apr_array_header_t **manifest,
+ fs_fs_t *fs,
+ const char *path,
+ apr_pool_t *pool)
+{
+ svn_stream_t *manifest_stream;
+ apr_pool_t *iterpool;
+
+ /* Open the manifest file. */
+ SVN_ERR(svn_stream_open_readonly(&manifest_stream,
+ svn_dirent_join(path, "manifest", pool),
+ pool, pool));
+
+ /* While we're here, let's just read the entire manifest file into an array,
+ so we can cache the entire thing. */
+ iterpool = svn_pool_create(pool);
+ *manifest = apr_array_make(pool, fs->max_files_per_dir, sizeof(apr_size_t));
+ while (1)
+ {
+ svn_stringbuf_t *sb;
+ svn_boolean_t eof;
+ apr_uint64_t val;
+ svn_error_t *err;
+
+ svn_pool_clear(iterpool);
+ SVN_ERR(svn_stream_readline(manifest_stream, &sb, "\n", &eof, iterpool));
+ if (eof)
+ break;
+
+ err = svn_cstring_strtoui64(&val, sb->data, 0, APR_SIZE_MAX, 10);
+ if (err)
+ return svn_error_createf(SVN_ERR_FS_CORRUPT, err,
+ _("Manifest offset '%s' too large"),
+ sb->data);
+ APR_ARRAY_PUSH(*manifest, apr_size_t) = (apr_size_t)val;
+ }
+ svn_pool_destroy(iterpool);
+
+ return svn_stream_close(manifest_stream);
+}
+
+/* Read header information for the revision stored in FILE_CONTENT (one
+ * whole revision). Return the offsets within FILE_CONTENT for the
+ * *ROOT_NODEREV, the list of *CHANGES and its len in *CHANGES_LEN.
+ * Use POOL for temporary allocations. */
+static svn_error_t *
+read_revision_header(apr_size_t *changes,
+ apr_size_t *changes_len,
+ apr_size_t *root_noderev,
+ svn_stringbuf_t *file_content,
+ apr_pool_t *pool)
+{
+ char buf[64];
+ const char *line;
+ char *space;
+ apr_uint64_t val;
+ apr_size_t len;
+
+ /* Read in this last block, from which we will identify the last line. */
+ len = sizeof(buf);
+ if (len > file_content->len)
+ len = file_content->len;
+
+ memcpy(buf, file_content->data + file_content->len - len, len);
+
+ /* The last byte should be a newline. */
+ if (buf[(apr_ssize_t)len - 1] != '\n')
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Revision lacks trailing newline"));
+
+ /* Look for the next previous newline. */
+ buf[len - 1] = 0;
+ line = strrchr(buf, '\n');
+ if (line == NULL)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Final line in revision file longer "
+ "than 64 characters"));
+
+ space = strchr(line, ' ');
+ if (space == NULL)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL,
+ _("Final line in revision file missing space"));
+
+ /* terminate the header line */
+ *space = 0;
+
+ /* extract information */
+ SVN_ERR(svn_cstring_strtoui64(&val, line+1, 0, APR_SIZE_MAX, 10));
+ *root_noderev = (apr_size_t)val;
+ SVN_ERR(svn_cstring_strtoui64(&val, space+1, 0, APR_SIZE_MAX, 10));
+ *changes = (apr_size_t)val;
+ *changes_len = file_content->len - *changes - (buf + len - line) + 1;
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the FSFS format number and sharding size from the format file at
+ * PATH and return it in *PFORMAT and *MAX_FILES_PER_DIR respectively.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+read_format(int *pformat, int *max_files_per_dir,
+ const char *path, apr_pool_t *pool)
+{
+ svn_error_t *err;
+ apr_file_t *file;
+ char buf[80];
+ apr_size_t len;
+
+ /* open format file and read the first line */
+ err = svn_io_file_open(&file, path, APR_READ | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ if (err && APR_STATUS_IS_ENOENT(err->apr_err))
+ {
+ /* Treat an absent format file as format 1. Do not try to
+ create the format file on the fly, because the repository
+ might be read-only for us, or this might be a read-only
+ operation, and the spirit of FSFS is to make no changes
+ whatseover in read-only operations. See thread starting at
+ http://subversion.tigris.org/servlets/ReadMsg?list=dev&msgNo=97600
+ for more. */
+ svn_error_clear(err);
+ *pformat = 1;
+ *max_files_per_dir = 0;
+
+ return SVN_NO_ERROR;
+ }
+ SVN_ERR(err);
+
+ len = sizeof(buf);
+ err = svn_io_read_length_line(file, buf, &len, pool);
+ if (err && APR_STATUS_IS_EOF(err->apr_err))
+ {
+ /* Return a more useful error message. */
+ svn_error_clear(err);
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("Can't read first line of format file '%s'"),
+ svn_dirent_local_style(path, pool));
+ }
+ SVN_ERR(err);
+
+ /* Check that the first line contains only digits. */
+ SVN_ERR(svn_cstring_atoi(pformat, buf));
+
+ /* Set the default values for anything that can be set via an option. */
+ *max_files_per_dir = 0;
+
+ /* Read any options. */
+ while (1)
+ {
+ len = sizeof(buf);
+ err = svn_io_read_length_line(file, buf, &len, pool);
+ if (err && APR_STATUS_IS_EOF(err->apr_err))
+ {
+ /* No more options; that's okay. */
+ svn_error_clear(err);
+ break;
+ }
+ SVN_ERR(err);
+
+ if (strncmp(buf, "layout ", 7) == 0)
+ {
+ if (strcmp(buf+7, "linear") == 0)
+ {
+ *max_files_per_dir = 0;
+ continue;
+ }
+
+ if (strncmp(buf+7, "sharded ", 8) == 0)
+ {
+ /* Check that the argument is numeric. */
+ SVN_ERR(svn_cstring_atoi(max_files_per_dir, buf + 15));
+ continue;
+ }
+ }
+
+ return svn_error_createf(SVN_ERR_BAD_VERSION_FILE_FORMAT, NULL,
+ _("'%s' contains invalid filesystem format option '%s'"),
+ svn_dirent_local_style(path, pool), buf);
+ }
+
+ return svn_io_file_close(file, pool);
+}
+
+/* Read the content of the file at PATH and return it in *RESULT.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+read_number(svn_revnum_t *result, const char *path, apr_pool_t *pool)
+{
+ svn_stringbuf_t *content;
+ apr_uint64_t number;
+
+ SVN_ERR(svn_stringbuf_from_file2(&content, path, pool));
+
+ content->data[content->len-1] = 0;
+ SVN_ERR(svn_cstring_strtoui64(&number, content->data, 0, LONG_MAX, 10));
+ *result = (svn_revnum_t)number;
+
+ return SVN_NO_ERROR;
+}
+
+/* Create *FS for the repository at PATH and read the format and size info.
+ * Use POOL for temporary allocations.
+ */
+static svn_error_t *
+fs_open(fs_fs_t **fs, const char *path, apr_pool_t *pool)
+{
+ *fs = apr_pcalloc(pool, sizeof(**fs));
+ (*fs)->path = apr_pstrdup(pool, path);
+ (*fs)->max_files_per_dir = 1000;
+
+ /* Read the FS format number. */
+ SVN_ERR(read_format(&(*fs)->format,
+ &(*fs)->max_files_per_dir,
+ svn_dirent_join(path, "db/format", pool),
+ pool));
+ if (((*fs)->format != 4) && ((*fs)->format != 6))
+ return svn_error_create(SVN_ERR_FS_UNSUPPORTED_FORMAT, NULL, NULL);
+
+ /* read size (HEAD) info */
+ SVN_ERR(read_number(&(*fs)->min_unpacked_rev,
+ svn_dirent_join(path, "db/min-unpacked-rev", pool),
+ pool));
+ return read_number(&(*fs)->max_revision,
+ svn_dirent_join(path, "db/current", pool),
+ pool);
+}
+
+/* Utility function that returns true if STRING->DATA matches KEY.
+ */
+static svn_boolean_t
+key_matches(svn_string_t *string, const char *key)
+{
+ return strcmp(string->data, key) == 0;
+}
+
+/* Comparator used for binary search comparing the absolute file offset
+ * of a representation to some other offset. DATA is a *representation_t,
+ * KEY is a pointer to an apr_size_t.
+ */
+static int
+compare_representation_offsets(const void *data, const void *key)
+{
+ apr_ssize_t diff = (*(const representation_t *const *)data)->offset
+ - *(const apr_size_t *)key;
+
+ /* sizeof(int) may be < sizeof(ssize_t) */
+ if (diff < 0)
+ return -1;
+ return diff > 0 ? 1 : 0;
+}
+
+/* Find the revision_info_t object to the given REVISION in FS and return
+ * it in *REVISION_INFO. For performance reasons, we skip the lookup if
+ * the info is already provided.
+ *
+ * In that revision, look for the representation_t object for offset OFFSET.
+ * If it already exists, set *IDX to its index in *REVISION_INFO's
+ * representations list and return the representation object. Otherwise,
+ * set the index to where it must be inserted and return NULL.
+ */
+static representation_t *
+find_representation(int *idx,
+ fs_fs_t *fs,
+ revision_info_t **revision_info,
+ svn_revnum_t revision,
+ apr_size_t offset)
+{
+ revision_info_t *info;
+ *idx = -1;
+
+ /* first let's find the revision */
+ info = revision_info ? *revision_info : NULL;
+ if (info == NULL || info->revision != revision)
+ {
+ info = APR_ARRAY_IDX(fs->revisions,
+ revision - fs->start_revision,
+ revision_info_t*);
+ if (revision_info)
+ *revision_info = info;
+ }
+
+ /* not found -> no result */
+ if (info == NULL)
+ return NULL;
+
+ assert(revision == info->revision);
+
+ /* look for the representation */
+ *idx = svn_sort__bsearch_lower_bound(&offset,
+ info->representations,
+ compare_representation_offsets);
+ if (*idx < info->representations->nelts)
+ {
+ /* return the representation, if this is the one we were looking for */
+ representation_t *result
+ = APR_ARRAY_IDX(info->representations, *idx, representation_t *);
+ if (result->offset == offset)
+ return result;
+ }
+
+ /* not parsed, yet */
+ return NULL;
+}
+
+/* Read the representation header in FILE_CONTENT at OFFSET. Return its
+ * size in *HEADER_SIZE, set *IS_PLAIN if no deltification was used and
+ * return the deltification base representation in *REPRESENTATION. If
+ * there is none, set it to NULL. Use FS to it look up.
+ *
+ * Use POOL for allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+read_rep_base(representation_t **representation,
+ apr_size_t *header_size,
+ svn_boolean_t *is_plain,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ char *str, *last_str;
+ int idx;
+ svn_revnum_t revision;
+ apr_uint64_t temp;
+
+ /* identify representation header (1 line) */
+ const char *buffer = file_content->data + offset;
+ const char *line_end = strchr(buffer, '\n');
+ *header_size = line_end - buffer + 1;
+
+ /* check for PLAIN rep */
+ if (strncmp(buffer, "PLAIN\n", *header_size) == 0)
+ {
+ *is_plain = TRUE;
+ *representation = NULL;
+ return SVN_NO_ERROR;
+ }
+
+ /* check for DELTA against empty rep */
+ *is_plain = FALSE;
+ if (strncmp(buffer, "DELTA\n", *header_size) == 0)
+ {
+ /* This is a delta against the empty stream. */
+ *representation = fs->null_base;
+ return SVN_NO_ERROR;
+ }
+
+ str = apr_pstrndup(scratch_pool, buffer, line_end - buffer);
+ last_str = str;
+
+ /* parse it. */
+ str = svn_cstring_tokenize(" ", &last_str);
+ str = svn_cstring_tokenize(" ", &last_str);
+ SVN_ERR(svn_revnum_parse(&revision, str, NULL));
+
+ str = svn_cstring_tokenize(" ", &last_str);
+ SVN_ERR(svn_cstring_strtoui64(&temp, str, 0, APR_SIZE_MAX, 10));
+
+ /* it should refer to a rep in an earlier revision. Look it up */
+ *representation = find_representation(&idx, fs, NULL, revision, (apr_size_t)temp);
+ return SVN_NO_ERROR;
+}
+
+/* Parse the representation reference (text: or props:) in VALUE, look
+ * it up in FS and return it in *REPRESENTATION. To be able to parse the
+ * base rep, we pass the FILE_CONTENT as well.
+ *
+ * If necessary, allocate the result in POOL; use SCRATCH_POOL for temp.
+ * allocations.
+ */
+static svn_error_t *
+parse_representation(representation_t **representation,
+ fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ svn_string_t *value,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ representation_t *result;
+ svn_revnum_t revision;
+
+ apr_uint64_t offset;
+ apr_uint64_t size;
+ apr_uint64_t expanded_size;
+ int idx;
+
+ /* read location (revision, offset) and size */
+ char *c = (char *)value->data;
+ SVN_ERR(svn_revnum_parse(&revision, svn_cstring_tokenize(" ", &c), NULL));
+ SVN_ERR(svn_cstring_strtoui64(&offset, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
+ SVN_ERR(svn_cstring_strtoui64(&size, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
+ SVN_ERR(svn_cstring_strtoui64(&expanded_size, svn_cstring_tokenize(" ", &c), 0, APR_SIZE_MAX, 10));
+
+ /* look it up */
+ result = find_representation(&idx, fs, &revision_info, revision, (apr_size_t)offset);
+ if (!result)
+ {
+ /* not parsed, yet (probably a rep in the same revision).
+ * Create a new rep object and determine its base rep as well.
+ */
+ apr_size_t header_size;
+ svn_boolean_t is_plain;
+
+ result = apr_pcalloc(pool, sizeof(*result));
+ result->revision = revision;
+ result->expanded_size = (apr_size_t)(expanded_size ? expanded_size : size);
+ result->offset = (apr_size_t)offset;
+ result->size = (apr_size_t)size;
+ SVN_ERR(read_rep_base(&result->delta_base, &header_size,
+ &is_plain, fs, file_content,
+ (apr_size_t)offset,
+ pool, scratch_pool));
+
+ result->header_size = header_size;
+ result->is_plain = is_plain;
+ svn_sort__array_insert(&result, revision_info->representations, idx);
+ }
+
+ *representation = result;
+
+ return SVN_NO_ERROR;
+}
+
+/* Get the unprocessed (i.e. still deltified) content of REPRESENTATION in
+ * FS and return it in *CONTENT. If no NULL, FILE_CONTENT must contain
+ * the contents of the revision that also contains the representation.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+get_rep_content(svn_stringbuf_t **content,
+ fs_fs_t *fs,
+ representation_t *representation,
+ svn_stringbuf_t *file_content,
+ apr_pool_t *pool)
+{
+ apr_off_t offset;
+ svn_revnum_t revision = representation->revision;
+ revision_info_t *revision_info = APR_ARRAY_IDX(fs->revisions,
+ revision - fs->start_revision,
+ revision_info_t*);
+
+ /* not in cache. Is the revision valid at all? */
+ if (revision - fs->start_revision > fs->revisions->nelts)
+ return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL,
+ _("Unknown revision %ld"), revision);
+
+ if (file_content)
+ {
+ offset = representation->offset
+ + representation->header_size;
+ *content = svn_stringbuf_ncreate(file_content->data + offset,
+ representation->size, pool);
+ }
+ else
+ {
+ offset = revision_info->offset
+ + representation->offset
+ + representation->header_size;
+ SVN_ERR(get_content(content, NULL, fs, revision, offset,
+ representation->size, pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+
+/* Read the delta window contents of all windows in REPRESENTATION in FS.
+ * If no NULL, FILE_CONTENT must contain the contents of the revision that
+ * also contains the representation.
+ * Return the data as svn_txdelta_window_t* instances in *WINDOWS.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+read_windows(apr_array_header_t **windows,
+ fs_fs_t *fs,
+ representation_t *representation,
+ svn_stringbuf_t *file_content,
+ apr_pool_t *pool)
+{
+ svn_stringbuf_t *content;
+ svn_stream_t *stream;
+ char version;
+ apr_size_t len = sizeof(version);
+
+ *windows = apr_array_make(pool, 0, sizeof(svn_txdelta_window_t *));
+
+ /* get the whole revision content */
+ SVN_ERR(get_rep_content(&content, fs, representation, file_content, pool));
+
+ /* create a read stream and position it directly after the rep header */
+ content->data += 3;
+ content->len -= 3;
+ stream = svn_stream_from_stringbuf(content, pool);
+ SVN_ERR(svn_stream_read(stream, &version, &len));
+
+ /* read the windows from that stream */
+ while (TRUE)
+ {
+ svn_txdelta_window_t *window;
+ svn_stream_mark_t *mark;
+ char dummy;
+
+ len = sizeof(dummy);
+ SVN_ERR(svn_stream_mark(stream, &mark, pool));
+ SVN_ERR(svn_stream_read(stream, &dummy, &len));
+ if (len == 0)
+ break;
+
+ SVN_ERR(svn_stream_seek(stream, mark));
+ SVN_ERR(svn_txdelta_read_svndiff_window(&window, stream, version, pool));
+ APR_ARRAY_PUSH(*windows, svn_txdelta_window_t *) = window;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Get the undeltified representation that is a result of combining all
+ * deltas from the current desired REPRESENTATION in FS with its base
+ * representation. If no NULL, FILE_CONTENT must contain the contents of
+ * the revision that also contains the representation. Store the result
+ * in *CONTENT. Use POOL for allocations.
+ */
+static svn_error_t *
+get_combined_window(svn_stringbuf_t **content,
+ fs_fs_t *fs,
+ representation_t *representation,
+ svn_stringbuf_t *file_content,
+ apr_pool_t *pool)
+{
+ int i;
+ apr_array_header_t *windows;
+ svn_stringbuf_t *base_content, *result;
+ const char *source;
+ apr_pool_t *sub_pool;
+ apr_pool_t *iter_pool;
+
+ /* special case: no un-deltification necessary */
+ if (representation->is_plain)
+ {
+ SVN_ERR(get_rep_content(content, fs, representation, file_content,
+ pool));
+ SVN_ERR(set_cached_window(fs, representation, *content, pool));
+ return SVN_NO_ERROR;
+ }
+
+ /* special case: data already in cache */
+ SVN_ERR(get_cached_window(content, fs, representation, pool));
+ if (*content)
+ return SVN_NO_ERROR;
+
+ /* read the delta windows for this representation */
+ sub_pool = svn_pool_create(pool);
+ iter_pool = svn_pool_create(pool);
+ SVN_ERR(read_windows(&windows, fs, representation, file_content, sub_pool));
+
+ /* fetch the / create a base content */
+ if (representation->delta_base && representation->delta_base->revision)
+ SVN_ERR(get_combined_window(&base_content, fs,
+ representation->delta_base, NULL, sub_pool));
+ else
+ base_content = svn_stringbuf_create_empty(sub_pool);
+
+ /* apply deltas */
+ result = svn_stringbuf_create_empty(pool);
+ source = base_content->data;
+
+ for (i = 0; i < windows->nelts; ++i)
+ {
+ svn_txdelta_window_t *window
+ = APR_ARRAY_IDX(windows, i, svn_txdelta_window_t *);
+ svn_stringbuf_t *buf
+ = svn_stringbuf_create_ensure(window->tview_len, iter_pool);
+
+ buf->len = window->tview_len;
+ svn_txdelta_apply_instructions(window, window->src_ops ? source : NULL,
+ buf->data, &buf->len);
+
+ svn_stringbuf_appendbytes(result, buf->data, buf->len);
+ source += window->sview_len;
+
+ svn_pool_clear(iter_pool);
+ }
+
+ /* cache result and return it */
+ SVN_ERR(set_cached_window(fs, representation, result, sub_pool));
+ *content = result;
+
+ svn_pool_destroy(iter_pool);
+ svn_pool_destroy(sub_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* forward declaration */
+static svn_error_t *
+read_noderev(fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool);
+
+/* Starting at the directory in REPRESENTATION in FILE_CONTENT, read all
+ * DAG nodes, directories and representations linked in that tree structure.
+ * Store them in FS and REVISION_INFO. Also, read them only once.
+ *
+ * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+parse_dir(fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ representation_t *representation,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_stringbuf_t *text;
+ apr_pool_t *iter_pool;
+ apr_pool_t *text_pool;
+ const char *current;
+ const char *revision_key;
+ apr_size_t key_len;
+
+ /* special case: empty dir rep */
+ if (representation == NULL)
+ return SVN_NO_ERROR;
+
+ /* get the directory as unparsed string */
+ iter_pool = svn_pool_create(scratch_pool);
+ text_pool = svn_pool_create(scratch_pool);
+
+ SVN_ERR(get_combined_window(&text, fs, representation, file_content,
+ text_pool));
+ current = text->data;
+
+ /* calculate some invariants */
+ revision_key = apr_psprintf(text_pool, "r%ld/", representation->revision);
+ key_len = strlen(revision_key);
+
+ /* Parse and process all directory entries. */
+ while (*current != 'E')
+ {
+ char *next;
+
+ /* skip "K ???\n<name>\nV ???\n" lines*/
+ current = strchr(current, '\n');
+ if (current)
+ current = strchr(current+1, '\n');
+ if (current)
+ current = strchr(current+1, '\n');
+ next = current ? strchr(++current, '\n') : NULL;
+ if (next == NULL)
+ return svn_error_createf(SVN_ERR_FS_CORRUPT, NULL,
+ _("Corrupt directory representation in r%ld at offset %ld"),
+ representation->revision,
+ (long)representation->offset);
+
+ /* iff this entry refers to a node in the same revision as this dir,
+ * recurse into that node */
+ *next = 0;
+ current = strstr(current, revision_key);
+ if (current)
+ {
+ /* recurse */
+ apr_uint64_t offset;
+
+ SVN_ERR(svn_cstring_strtoui64(&offset, current + key_len, 0,
+ APR_SIZE_MAX, 10));
+ SVN_ERR(read_noderev(fs, file_content, (apr_size_t)offset,
+ revision_info, pool, iter_pool));
+
+ svn_pool_clear(iter_pool);
+ }
+ current = next+1;
+ }
+
+ svn_pool_destroy(iter_pool);
+ svn_pool_destroy(text_pool);
+ return SVN_NO_ERROR;
+}
+
+/* Starting at the noderev at OFFSET in FILE_CONTENT, read all DAG nodes,
+ * directories and representations linked in that tree structure. Store
+ * them in FS and REVISION_INFO. Also, read them only once. Return the
+ * result in *NODEREV.
+ *
+ * Use POOL for persistent allocations and SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+read_noderev(fs_fs_t *fs,
+ svn_stringbuf_t *file_content,
+ apr_size_t offset,
+ revision_info_t *revision_info,
+ apr_pool_t *pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_string_t *line;
+ representation_t *text = NULL;
+ representation_t *props = NULL;
+ apr_size_t start_offset = offset;
+ svn_boolean_t is_dir = FALSE;
+ const char *path = "???";
+
+ scratch_pool = svn_pool_create(scratch_pool);
+
+ /* parse the noderev line-by-line until we find an empty line */
+ while (1)
+ {
+ /* for this line, extract key and value. Ignore invalid values */
+ svn_string_t key;
+ svn_string_t value;
+ char *sep;
+ const char *start = file_content->data + offset;
+ const char *end = strchr(start, '\n');
+
+ line = svn_string_ncreate(start, end - start, scratch_pool);
+ offset += end - start + 1;
+
+ /* empty line -> end of noderev data */
+ if (line->len == 0)
+ break;
+
+ sep = strchr(line->data, ':');
+ if (sep == NULL)
+ continue;
+
+ key.data = line->data;
+ key.len = sep - key.data;
+ *sep = 0;
+
+ if (key.len + 2 > line->len)
+ continue;
+
+ value.data = sep + 2;
+ value.len = line->len - (key.len + 2);
+
+ /* translate (key, value) into noderev elements */
+ if (key_matches(&key, "type"))
+ is_dir = strcmp(value.data, "dir") == 0;
+ else if (key_matches(&key, "text"))
+ {
+ SVN_ERR(parse_representation(&text, fs, file_content,
+ &value, revision_info,
+ pool, scratch_pool));
+
+ /* if we are the first to use this rep, mark it as "text rep" */
+ if (++text->ref_count == 1)
+ text->kind = is_dir ? dir_rep : file_rep;
+ }
+ else if (key_matches(&key, "props"))
+ {
+ SVN_ERR(parse_representation(&props, fs, file_content,
+ &value, revision_info,
+ pool, scratch_pool));
+
+ /* if we are the first to use this rep, mark it as "prop rep" */
+ if (++props->ref_count == 1)
+ props->kind = is_dir ? dir_property_rep : file_property_rep;
+ }
+ else if (key_matches(&key, "cpath"))
+ path = value.data;
+ }
+
+ /* record largest changes */
+ if (text && text->ref_count == 1)
+ add_change(fs, (apr_int64_t)text->size, (apr_int64_t)text->expanded_size,
+ text->revision, path, text->kind);
+ if (props && props->ref_count == 1)
+ add_change(fs, (apr_int64_t)props->size, (apr_int64_t)props->expanded_size,
+ props->revision, path, props->kind);
+
+ /* if this is a directory and has not been processed, yet, read and
+ * process it recursively */
+ if (is_dir && text && text->ref_count == 1)
+ SVN_ERR(parse_dir(fs, file_content, text, revision_info,
+ pool, scratch_pool));
+
+ /* update stats */
+ if (is_dir)
+ {
+ revision_info->dir_noderev_size += offset - start_offset;
+ revision_info->dir_noderev_count++;
+ }
+ else
+ {
+ revision_info->file_noderev_size += offset - start_offset;
+ revision_info->file_noderev_count++;
+ }
+ svn_pool_destroy(scratch_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Given the unparsed changes list in CHANGES with LEN chars, return the
+ * number of changed paths encoded in it.
+ */
+static apr_size_t
+get_change_count(const char *changes,
+ apr_size_t len)
+{
+ apr_size_t lines = 0;
+ const char *end = changes + len;
+
+ /* line count */
+ for (; changes < end; ++changes)
+ if (*changes == '\n')
+ ++lines;
+
+ /* two lines per change */
+ return lines / 2;
+}
+
+/* Simple utility to print a REVISION number and make it appear immediately.
+ */
+static void
+print_progress(svn_revnum_t revision)
+{
+ printf("%8ld", revision);
+ fflush(stdout);
+}
+
+/* Read the content of the pack file staring at revision BASE and store it
+ * in FS. Use POOL for allocations.
+ */
+static svn_error_t *
+read_pack_file(fs_fs_t *fs,
+ svn_revnum_t base,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *manifest = NULL;
+ apr_pool_t *local_pool = svn_pool_create(pool);
+ apr_pool_t *iter_pool = svn_pool_create(local_pool);
+ int i;
+ apr_off_t file_size = 0;
+ apr_file_t *file;
+ const char *pack_folder = get_pack_folder(fs, base, local_pool);
+
+ /* parse the manifest file */
+ SVN_ERR(read_manifest(&manifest, fs, pack_folder, local_pool));
+ if (manifest->nelts != fs->max_files_per_dir)
+ return svn_error_create(SVN_ERR_FS_CORRUPT, NULL, NULL);
+
+ SVN_ERR(open_rev_or_pack_file(&file, fs, base, local_pool));
+ SVN_ERR(get_file_size(&file_size, file, local_pool));
+
+ /* process each revision in the pack file */
+ for (i = 0; i < manifest->nelts; ++i)
+ {
+ apr_size_t root_node_offset;
+ svn_stringbuf_t *rev_content;
+
+ /* create the revision info for the current rev */
+ revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
+ info->representations = apr_array_make(iter_pool, 4, sizeof(representation_t*));
+
+ info->revision = base + i;
+ info->offset = APR_ARRAY_IDX(manifest, i, apr_size_t);
+ info->end = i+1 < manifest->nelts
+ ? APR_ARRAY_IDX(manifest, i+1 , apr_size_t)
+ : file_size;
+
+ SVN_ERR(get_content(&rev_content, file, fs, info->revision,
+ info->offset,
+ info->end - info->offset,
+ iter_pool));
+
+ SVN_ERR(read_revision_header(&info->changes,
+ &info->changes_len,
+ &root_node_offset,
+ rev_content,
+ iter_pool));
+
+ info->change_count
+ = get_change_count(rev_content->data + info->changes,
+ info->changes_len);
+ SVN_ERR(read_noderev(fs, rev_content,
+ root_node_offset, info, pool, iter_pool));
+
+ info->representations = apr_array_copy(pool, info->representations);
+ APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
+
+ /* destroy temps */
+ svn_pool_clear(iter_pool);
+ }
+
+ /* one more pack file processed */
+ print_progress(base);
+ svn_pool_destroy(local_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the content of the file for REVSION and store its contents in FS.
+ * Use POOL for allocations.
+ */
+static svn_error_t *
+read_revision_file(fs_fs_t *fs,
+ svn_revnum_t revision,
+ apr_pool_t *pool)
+{
+ apr_size_t root_node_offset;
+ apr_pool_t *local_pool = svn_pool_create(pool);
+ svn_stringbuf_t *rev_content;
+ revision_info_t *info = apr_pcalloc(pool, sizeof(*info));
+ apr_off_t file_size = 0;
+ apr_file_t *file;
+
+ /* read the whole pack file into memory */
+ SVN_ERR(open_rev_or_pack_file(&file, fs, revision, local_pool));
+ SVN_ERR(get_file_size(&file_size, file, local_pool));
+
+ /* create the revision info for the current rev */
+ info->representations = apr_array_make(pool, 4, sizeof(representation_t*));
+
+ info->revision = revision;
+ info->offset = 0;
+ info->end = file_size;
+
+ SVN_ERR(get_content(&rev_content, file, fs, revision, 0, file_size,
+ local_pool));
+
+ SVN_ERR(read_revision_header(&info->changes,
+ &info->changes_len,
+ &root_node_offset,
+ rev_content,
+ local_pool));
+
+ /* put it into our containers */
+ APR_ARRAY_PUSH(fs->revisions, revision_info_t*) = info;
+
+ info->change_count
+ = get_change_count(rev_content->data + info->changes,
+ info->changes_len);
+
+ /* parse the revision content recursively. */
+ SVN_ERR(read_noderev(fs, rev_content,
+ root_node_offset, info,
+ pool, local_pool));
+
+ /* show progress every 1000 revs or so */
+ if (revision % fs->max_files_per_dir == 0)
+ print_progress(revision);
+
+ svn_pool_destroy(local_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Read the repository at PATH beginning with revision START_REVISION and
+ * return the result in *FS. Allocate caches with MEMSIZE bytes total
+ * capacity. Use POOL for non-cache allocations.
+ */
+static svn_error_t *
+read_revisions(fs_fs_t **fs,
+ const char *path,
+ svn_revnum_t start_revision,
+ apr_size_t memsize,
+ apr_pool_t *pool)
+{
+ svn_revnum_t revision;
+ svn_cache_config_t cache_config = *svn_cache_config_get();
+
+ /* determine cache sizes */
+
+ if (memsize < 100)
+ memsize = 100;
+
+ cache_config.cache_size = memsize * 1024 * 1024;
+ svn_cache_config_set(&cache_config);
+
+ SVN_ERR(fs_open(fs, path, pool));
+
+ /* create data containers and caches */
+ (*fs)->start_revision = start_revision
+ - (start_revision % (*fs)->max_files_per_dir);
+ (*fs)->revisions = apr_array_make(pool,
+ (*fs)->max_revision + 1 - (*fs)->start_revision,
+ sizeof(revision_info_t *));
+ (*fs)->null_base = apr_pcalloc(pool, sizeof(*(*fs)->null_base));
+ initialize_largest_changes(*fs, 64, pool);
+ (*fs)->by_extension = apr_hash_make(pool);
+
+ SVN_ERR(svn_cache__create_membuffer_cache(&(*fs)->window_cache,
+ svn_cache__get_global_membuffer_cache(),
+ NULL, NULL,
+ sizeof(window_cache_key_t),
+ "", FALSE, pool));
+
+ /* read all packed revs */
+ for ( revision = start_revision
+ ; revision < (*fs)->min_unpacked_rev
+ ; revision += (*fs)->max_files_per_dir)
+ SVN_ERR(read_pack_file(*fs, revision, pool));
+
+ /* read non-packed revs */
+ for ( ; revision <= (*fs)->max_revision; ++revision)
+ SVN_ERR(read_revision_file(*fs, revision, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Compression statistics we collect over a given set of representations.
+ */
+typedef struct rep_pack_stats_t
+{
+ /* number of representations */
+ apr_int64_t count;
+
+ /* total size after deltification (i.e. on disk size) */
+ apr_int64_t packed_size;
+
+ /* total size after de-deltification (i.e. plain text size) */
+ apr_int64_t expanded_size;
+
+ /* total on-disk header size */
+ apr_int64_t overhead_size;
+} rep_pack_stats_t;
+
+/* Statistics we collect over a given set of representations.
+ * We group them into shared and non-shared ("unique") reps.
+ */
+typedef struct representation_stats_t
+{
+ /* stats over all representations */
+ rep_pack_stats_t total;
+
+ /* stats over those representations with ref_count == 1 */
+ rep_pack_stats_t uniques;
+
+ /* stats over those representations with ref_count > 1 */
+ rep_pack_stats_t shared;
+
+ /* sum of all ref_counts */
+ apr_int64_t references;
+
+ /* sum of ref_count * expanded_size,
+ * i.e. total plaintext content if there was no rep sharing */
+ apr_int64_t expanded_size;
+} representation_stats_t;
+
+/* Basic statistics we collect over a given set of noderevs.
+ */
+typedef struct node_stats_t
+{
+ /* number of noderev structs */
+ apr_int64_t count;
+
+ /* their total size on disk (structs only) */
+ apr_int64_t size;
+} node_stats_t;
+
+/* Accumulate stats of REP in STATS.
+ */
+static void
+add_rep_pack_stats(rep_pack_stats_t *stats,
+ representation_t *rep)
+{
+ stats->count++;
+
+ stats->packed_size += rep->size;
+ stats->expanded_size += rep->expanded_size;
+ stats->overhead_size += rep->header_size + 7 /* ENDREP\n */;
+}
+
+/* Accumulate stats of REP in STATS.
+ */
+static void
+add_rep_stats(representation_stats_t *stats,
+ representation_t *rep)
+{
+ add_rep_pack_stats(&stats->total, rep);
+ if (rep->ref_count == 1)
+ add_rep_pack_stats(&stats->uniques, rep);
+ else
+ add_rep_pack_stats(&stats->shared, rep);
+
+ stats->references += rep->ref_count;
+ stats->expanded_size += rep->ref_count * rep->expanded_size;
+}
+
+/* Print statistics for the given group of representations to console.
+ * Use POOL for allocations.
+ */
+static void
+print_rep_stats(representation_stats_t *stats,
+ apr_pool_t *pool)
+{
+ printf(_("%20s bytes in %12s reps\n"
+ "%20s bytes in %12s shared reps\n"
+ "%20s bytes expanded size\n"
+ "%20s bytes expanded shared size\n"
+ "%20s bytes with rep-sharing off\n"
+ "%20s shared references\n"),
+ svn__i64toa_sep(stats->total.packed_size, ',', pool),
+ svn__i64toa_sep(stats->total.count, ',', pool),
+ svn__i64toa_sep(stats->shared.packed_size, ',', pool),
+ svn__i64toa_sep(stats->shared.count, ',', pool),
+ svn__i64toa_sep(stats->total.expanded_size, ',', pool),
+ svn__i64toa_sep(stats->shared.expanded_size, ',', pool),
+ svn__i64toa_sep(stats->expanded_size, ',', pool),
+ svn__i64toa_sep(stats->references - stats->total.count, ',', pool));
+}
+
+/* Print the (used) contents of CHANGES. Use POOL for allocations.
+ */
+static void
+print_largest_reps(largest_changes_t *changes,
+ apr_pool_t *pool)
+{
+ apr_size_t i;
+ for (i = 0; i < changes->count && changes->changes[i]->size; ++i)
+ printf(_("%12s r%-8ld %s\n"),
+ svn__i64toa_sep(changes->changes[i]->size, ',', pool),
+ changes->changes[i]->revision,
+ changes->changes[i]->path->data);
+}
+
+/* Print the non-zero section of HISTOGRAM to console.
+ * Use POOL for allocations.
+ */
+static void
+print_histogram(histogram_t *histogram,
+ apr_pool_t *pool)
+{
+ int first = 0;
+ int last = 63;
+ int i;
+
+ /* identify non-zero range */
+ while (last > 0 && histogram->lines[last].count == 0)
+ --last;
+
+ while (first <= last && histogram->lines[first].count == 0)
+ ++first;
+
+ /* display histogram lines */
+ for (i = last; i >= first; --i)
+ printf(_(" [2^%2d, 2^%2d) %15s (%2d%%) bytes in %12s (%2d%%) items\n"),
+ i-1, i,
+ svn__i64toa_sep(histogram->lines[i].sum, ',', pool),
+ (int)(histogram->lines[i].sum * 100 / histogram->total.sum),
+ svn__i64toa_sep(histogram->lines[i].count, ',', pool),
+ (int)(histogram->lines[i].count * 100 / histogram->total.count));
+}
+
+/* COMPARISON_FUNC for svn_sort__hash.
+ * Sort extension_info_t values by total count in descending order.
+ */
+static int
+compare_count(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ const extension_info_t *lhs = a->value;
+ const extension_info_t *rhs = b->value;
+ apr_int64_t diff = lhs->node_histogram.total.count
+ - rhs->node_histogram.total.count;
+
+ return diff > 0 ? -1 : (diff < 0 ? 1 : 0);
+}
+
+/* COMPARISON_FUNC for svn_sort__hash.
+ * Sort extension_info_t values by total uncompressed size in descending order.
+ */
+static int
+compare_node_size(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ const extension_info_t *lhs = a->value;
+ const extension_info_t *rhs = b->value;
+ apr_int64_t diff = lhs->node_histogram.total.sum
+ - rhs->node_histogram.total.sum;
+
+ return diff > 0 ? -1 : (diff < 0 ? 1 : 0);
+}
+
+/* COMPARISON_FUNC for svn_sort__hash.
+ * Sort extension_info_t values by total prep count in descending order.
+ */
+static int
+compare_rep_size(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ const extension_info_t *lhs = a->value;
+ const extension_info_t *rhs = b->value;
+ apr_int64_t diff = lhs->rep_histogram.total.sum
+ - rhs->rep_histogram.total.sum;
+
+ return diff > 0 ? -1 : (diff < 0 ? 1 : 0);
+}
+
+/* Return an array of extension_info_t* for the (up to) 16 most prominent
+ * extensions in FS according to the sort criterion COMPARISON_FUNC.
+ * Allocate results in POOL.
+ */
+static apr_array_header_t *
+get_by_extensions(fs_fs_t *fs,
+ int (*comparison_func)(const svn_sort__item_t *,
+ const svn_sort__item_t *),
+ apr_pool_t *pool)
+{
+ /* sort all data by extension */
+ apr_array_header_t *sorted
+ = svn_sort__hash(fs->by_extension, comparison_func, pool);
+
+ /* select the top (first) 16 entries */
+ int count = MIN(sorted->nelts, 16);
+ apr_array_header_t *result
+ = apr_array_make(pool, count, sizeof(extension_info_t*));
+ int i;
+
+ for (i = 0; i < count; ++i)
+ APR_ARRAY_PUSH(result, extension_info_t*)
+ = APR_ARRAY_IDX(sorted, i, svn_sort__item_t).value;
+
+ return result;
+}
+
+/* Add all extension_info_t* entries of TO_ADD not already in TARGET to
+ * TARGET.
+ */
+static void
+merge_by_extension(apr_array_header_t *target,
+ apr_array_header_t *to_add)
+{
+ int i, k, count;
+
+ count = target->nelts;
+ for (i = 0; i < to_add->nelts; ++i)
+ {
+ extension_info_t *info = APR_ARRAY_IDX(to_add, i, extension_info_t *);
+ for (k = 0; k < count; ++k)
+ if (info == APR_ARRAY_IDX(target, k, extension_info_t *))
+ break;
+
+ if (k == count)
+ APR_ARRAY_PUSH(target, extension_info_t*) = info;
+ }
+}
+
+/* Print the (up to) 16 extensions in FS with the most changes.
+ * Use POOL for allocations.
+ */
+static void
+print_extensions_by_changes(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *data = get_by_extensions(fs, compare_count, pool);
+ apr_int64_t sum = 0;
+ int i;
+
+ for (i = 0; i < data->nelts; ++i)
+ {
+ extension_info_t *info = APR_ARRAY_IDX(data, i, extension_info_t *);
+ sum += info->node_histogram.total.count;
+ printf(_(" %9s %12s (%2d%%) changes\n"),
+ info->extension,
+ svn__i64toa_sep(info->node_histogram.total.count, ',', pool),
+ (int)(info->node_histogram.total.count * 100 /
+ fs->file_histogram.total.count));
+ }
+
+ printf(_(" %9s %12s (%2d%%) changes\n"),
+ "(others)",
+ svn__i64toa_sep(fs->file_histogram.total.count - sum, ',', pool),
+ (int)((fs->file_histogram.total.count - sum) * 100 /
+ fs->file_histogram.total.count));
+}
+
+/* Print the (up to) 16 extensions in FS with the largest total size of
+ * changed file content. Use POOL for allocations.
+ */
+static void
+print_extensions_by_nodes(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *data = get_by_extensions(fs, compare_node_size, pool);
+ apr_int64_t sum = 0;
+ int i;
+
+ for (i = 0; i < data->nelts; ++i)
+ {
+ extension_info_t *info = APR_ARRAY_IDX(data, i, extension_info_t *);
+ sum += info->node_histogram.total.sum;
+ printf(_(" %9s %20s (%2d%%) bytes\n"),
+ info->extension,
+ svn__i64toa_sep(info->node_histogram.total.sum, ',', pool),
+ (int)(info->node_histogram.total.sum * 100 /
+ fs->file_histogram.total.sum));
+ }
+
+ printf(_(" %9s %20s (%2d%%) bytes\n"),
+ "(others)",
+ svn__i64toa_sep(fs->file_histogram.total.sum - sum, ',', pool),
+ (int)((fs->file_histogram.total.sum - sum) * 100 /
+ fs->file_histogram.total.sum));
+}
+
+/* Print the (up to) 16 extensions in FS with the largest total size of
+ * changed file content. Use POOL for allocations.
+ */
+static void
+print_extensions_by_reps(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *data = get_by_extensions(fs, compare_rep_size, pool);
+ apr_int64_t sum = 0;
+ int i;
+
+ for (i = 0; i < data->nelts; ++i)
+ {
+ extension_info_t *info = APR_ARRAY_IDX(data, i, extension_info_t *);
+ sum += info->rep_histogram.total.sum;
+ printf(_(" %9s %20s (%2d%%) bytes\n"),
+ info->extension,
+ svn__i64toa_sep(info->rep_histogram.total.sum, ',', pool),
+ (int)(info->rep_histogram.total.sum * 100 /
+ fs->rep_size_histogram.total.sum));
+ }
+
+ printf(_(" %9s %20s (%2d%%) bytes\n"),
+ "(others)",
+ svn__i64toa_sep(fs->rep_size_histogram.total.sum - sum, ',', pool),
+ (int)((fs->rep_size_histogram.total.sum - sum) * 100 /
+ fs->rep_size_histogram.total.sum));
+}
+
+/* Print per-extension histograms for the most frequent extensions in FS.
+ * Use POOL for allocations. */
+static void
+print_histograms_by_extension(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ apr_array_header_t *data = get_by_extensions(fs, compare_count, pool);
+ int i;
+
+ merge_by_extension(data, get_by_extensions(fs, compare_node_size, pool));
+ merge_by_extension(data, get_by_extensions(fs, compare_rep_size, pool));
+
+ for (i = 0; i < data->nelts; ++i)
+ {
+ extension_info_t *info = APR_ARRAY_IDX(data, i, extension_info_t *);
+ printf("\nHistogram of '%s' file sizes:\n", info->extension);
+ print_histogram(&info->node_histogram, pool);
+ printf("\nHistogram of '%s' file representation sizes:\n",
+ info->extension);
+ print_histogram(&info->rep_histogram, pool);
+ }
+}
+
+/* Post-process stats for FS and print them to the console.
+ * Use POOL for allocations.
+ */
+static void
+print_stats(fs_fs_t *fs,
+ apr_pool_t *pool)
+{
+ int i, k;
+
+ /* initialize stats to collect */
+ representation_stats_t file_rep_stats = { { 0 } };
+ representation_stats_t dir_rep_stats = { { 0 } };
+ representation_stats_t file_prop_rep_stats = { { 0 } };
+ representation_stats_t dir_prop_rep_stats = { { 0 } };
+ representation_stats_t total_rep_stats = { { 0 } };
+
+ node_stats_t dir_node_stats = { 0 };
+ node_stats_t file_node_stats = { 0 };
+ node_stats_t total_node_stats = { 0 };
+
+ apr_int64_t total_size = 0;
+ apr_int64_t change_count = 0;
+ apr_int64_t change_len = 0;
+
+ /* aggregate info from all revisions */
+ for (i = 0; i < fs->revisions->nelts; ++i)
+ {
+ revision_info_t *revision = APR_ARRAY_IDX(fs->revisions, i,
+ revision_info_t *);
+
+ /* data gathered on a revision level */
+ change_count += revision->change_count;
+ change_len += revision->changes_len;
+ total_size += revision->end - revision->offset;
+
+ dir_node_stats.count += revision->dir_noderev_count;
+ dir_node_stats.size += revision->dir_noderev_size;
+ file_node_stats.count += revision->file_noderev_count;
+ file_node_stats.size += revision->file_noderev_size;
+ total_node_stats.count += revision->dir_noderev_count
+ + revision->file_noderev_count;
+ total_node_stats.size += revision->dir_noderev_size
+ + revision->file_noderev_size;
+
+ /* process representations */
+ for (k = 0; k < revision->representations->nelts; ++k)
+ {
+ representation_t *rep = APR_ARRAY_IDX(revision->representations,
+ k, representation_t *);
+
+ /* accumulate in the right bucket */
+ switch(rep->kind)
+ {
+ case file_rep:
+ add_rep_stats(&file_rep_stats, rep);
+ break;
+ case dir_rep:
+ add_rep_stats(&dir_rep_stats, rep);
+ break;
+ case file_property_rep:
+ add_rep_stats(&file_prop_rep_stats, rep);
+ break;
+ case dir_property_rep:
+ add_rep_stats(&dir_prop_rep_stats, rep);
+ break;
+ default:
+ break;
+ }
+
+ add_rep_stats(&total_rep_stats, rep);
+ }
+ }
+
+ /* print results */
+ printf("\nGlobal statistics:\n");
+ printf(_("%20s bytes in %12s revisions\n"
+ "%20s bytes in %12s changes\n"
+ "%20s bytes in %12s node revision records\n"
+ "%20s bytes in %12s representations\n"
+ "%20s bytes expanded representation size\n"
+ "%20s bytes with rep-sharing off\n"),
+ svn__i64toa_sep(total_size, ',', pool),
+ svn__i64toa_sep(fs->revisions->nelts, ',', pool),
+ svn__i64toa_sep(change_len, ',', pool),
+ svn__i64toa_sep(change_count, ',', pool),
+ svn__i64toa_sep(total_node_stats.size, ',', pool),
+ svn__i64toa_sep(total_node_stats.count, ',', pool),
+ svn__i64toa_sep(total_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(total_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(total_rep_stats.total.expanded_size, ',', pool),
+ svn__i64toa_sep(total_rep_stats.expanded_size, ',', pool));
+
+ printf("\nNoderev statistics:\n");
+ printf(_("%20s bytes in %12s nodes total\n"
+ "%20s bytes in %12s directory noderevs\n"
+ "%20s bytes in %12s file noderevs\n"),
+ svn__i64toa_sep(total_node_stats.size, ',', pool),
+ svn__i64toa_sep(total_node_stats.count, ',', pool),
+ svn__i64toa_sep(dir_node_stats.size, ',', pool),
+ svn__i64toa_sep(dir_node_stats.count, ',', pool),
+ svn__i64toa_sep(file_node_stats.size, ',', pool),
+ svn__i64toa_sep(file_node_stats.count, ',', pool));
+
+ printf("\nRepresentation statistics:\n");
+ printf(_("%20s bytes in %12s representations total\n"
+ "%20s bytes in %12s directory representations\n"
+ "%20s bytes in %12s file representations\n"
+ "%20s bytes in %12s directory property representations\n"
+ "%20s bytes in %12s file property representations\n"
+ "%20s bytes in header & footer overhead\n"),
+ svn__i64toa_sep(total_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(total_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(dir_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(dir_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(file_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(file_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(dir_prop_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(dir_prop_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(file_prop_rep_stats.total.packed_size, ',', pool),
+ svn__i64toa_sep(file_prop_rep_stats.total.count, ',', pool),
+ svn__i64toa_sep(total_rep_stats.total.overhead_size, ',', pool));
+
+ printf("\nDirectory representation statistics:\n");
+ print_rep_stats(&dir_rep_stats, pool);
+ printf("\nFile representation statistics:\n");
+ print_rep_stats(&file_rep_stats, pool);
+ printf("\nDirectory property representation statistics:\n");
+ print_rep_stats(&dir_prop_rep_stats, pool);
+ printf("\nFile property representation statistics:\n");
+ print_rep_stats(&file_prop_rep_stats, pool);
+
+ printf("\nLargest representations:\n");
+ print_largest_reps(fs->largest_changes, pool);
+ printf("\nExtensions by number of changes:\n");
+ print_extensions_by_changes(fs, pool);
+ printf("\nExtensions by size of changed files:\n");
+ print_extensions_by_nodes(fs, pool);
+ printf("\nExtensions by size of representations:\n");
+ print_extensions_by_reps(fs, pool);
+
+ printf("\nHistogram of expanded node sizes:\n");
+ print_histogram(&fs->node_size_histogram, pool);
+ printf("\nHistogram of representation sizes:\n");
+ print_histogram(&fs->rep_size_histogram, pool);
+ printf("\nHistogram of file sizes:\n");
+ print_histogram(&fs->file_histogram, pool);
+ printf("\nHistogram of file representation sizes:\n");
+ print_histogram(&fs->file_rep_histogram, pool);
+ printf("\nHistogram of file property sizes:\n");
+ print_histogram(&fs->file_prop_histogram, pool);
+ printf("\nHistogram of file property representation sizes:\n");
+ print_histogram(&fs->file_prop_rep_histogram, pool);
+ printf("\nHistogram of directory sizes:\n");
+ print_histogram(&fs->dir_histogram, pool);
+ printf("\nHistogram of directory representation sizes:\n");
+ print_histogram(&fs->dir_rep_histogram, pool);
+ printf("\nHistogram of directory property sizes:\n");
+ print_histogram(&fs->dir_prop_histogram, pool);
+ printf("\nHistogram of directory property representation sizes:\n");
+ print_histogram(&fs->dir_prop_rep_histogram, pool);
+
+ print_histograms_by_extension(fs, pool);
+}
+
+/* Write tool usage info text to OSTREAM using PROGNAME as a prefix and
+ * POOL for allocations.
+ */
+static void
+print_usage(svn_stream_t *ostream, const char *progname,
+ apr_pool_t *pool)
+{
+ svn_error_clear(svn_stream_printf(ostream, pool,
+ "\n"
+ "Usage: %s <repo> [cachesize]\n"
+ "\n"
+ "Read the repository at local path <repo> starting at revision 0,\n"
+ "count statistical information and write that data to stdout.\n"
+ "Use up to [cachesize] MB of memory for caching. This does not include\n"
+ "temporary representation of the repository structure, i.e. the actual\n"
+ "memory may be considerably higher. If not given, defaults to 100 MB.\n",
+ progname));
+}
+
+/* linear control flow */
+int main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ svn_stream_t *ostream;
+ svn_error_t *svn_err;
+ const char *repo_path = NULL;
+ svn_revnum_t start_revision = 0;
+ apr_size_t memsize = 100;
+ apr_uint64_t temp = 0;
+ fs_fs_t *fs;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ svn_err = svn_stream_for_stdout(&ostream, pool);
+ if (svn_err)
+ {
+ svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
+ return 2;
+ }
+
+ if (argc < 2 || argc > 3)
+ {
+ print_usage(ostream, argv[0], pool);
+ return 2;
+ }
+
+ if (argc == 3)
+ {
+ svn_err = svn_cstring_strtoui64(&temp, argv[2], 0, APR_SIZE_MAX, 10);
+ if (svn_err)
+ {
+ print_usage(ostream, argv[0], pool);
+ svn_error_clear(svn_err);
+ return 2;
+ }
+
+ memsize = (apr_size_t)temp;
+ }
+
+ repo_path = svn_dirent_canonicalize(argv[1], pool);
+ start_revision = 0;
+
+ printf("Reading revisions\n");
+ svn_err = read_revisions(&fs, repo_path, start_revision, memsize, pool);
+ printf("\n");
+
+ if (svn_err)
+ {
+ svn_handle_error2(svn_err, stdout, FALSE, ERROR_TAG);
+ return 2;
+ }
+
+ print_stats(fs, pool);
+
+ return 0;
+}
diff --git a/tools/server-side/mod_dontdothat/mod_dontdothat.c b/tools/server-side/mod_dontdothat/mod_dontdothat.c
index c7c6613..b4801ed 100644
--- a/tools/server-side/mod_dontdothat/mod_dontdothat.c
+++ b/tools/server-side/mod_dontdothat/mod_dontdothat.c
@@ -30,12 +30,15 @@
#include <util_filter.h>
#include <ap_config.h>
#include <apr_strings.h>
+#include <apr_uri.h>
#include <expat.h>
#include "mod_dav_svn.h"
#include "svn_string.h"
#include "svn_config.h"
+#include "svn_path.h"
+#include "private/svn_fspath.h"
module AP_MODULE_DECLARE_DATA dontdothat_module;
@@ -161,26 +164,71 @@ matches(const char *wc, const char *p)
}
}
+/* duplicate of dav_svn__log_err() from mod_dav_svn/util.c */
+static void
+log_dav_err(request_rec *r,
+ dav_error *err,
+ int level)
+{
+ dav_error *errscan;
+
+ /* Log the errors */
+ /* ### should have a directive to log the first or all */
+ for (errscan = err; errscan != NULL; errscan = errscan->prev) {
+ apr_status_t status;
+
+ if (errscan->desc == NULL)
+ continue;
+
+#if AP_MODULE_MAGIC_AT_LEAST(20091119,0)
+ status = errscan->aprerr;
+#else
+ status = errscan->save_errno;
+#endif
+
+ ap_log_rerror(APLOG_MARK, level, status, r,
+ "%s [%d, #%d]",
+ errscan->desc, errscan->status, errscan->error_id);
+ }
+}
+
static svn_boolean_t
is_this_legal(dontdothat_filter_ctx *ctx, const char *uri)
{
const char *relative_path;
const char *cleaned_uri;
const char *repos_name;
+ const char *uri_path;
int trailing_slash;
dav_error *derr;
- /* Ok, so we need to skip past the scheme, host, etc. */
- uri = ap_strstr_c(uri, "://");
- if (uri)
- uri = ap_strchr_c(uri + 3, '/');
+ /* uri can be an absolute uri or just a path, we only want the path to match
+ * against */
+ if (uri && svn_path_is_url(uri))
+ {
+ apr_uri_t parsed_uri;
+ apr_status_t rv = apr_uri_parse(ctx->r->pool, uri, &parsed_uri);
+ if (APR_SUCCESS != rv)
+ {
+ /* Error parsing the URI, log and reject request. */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, ctx->r,
+ "mod_dontdothat: blocked request after failing "
+ "to parse uri: '%s'", uri);
+ return FALSE;
+ }
+ uri_path = parsed_uri.path;
+ }
+ else
+ {
+ uri_path = uri;
+ }
- if (uri)
+ if (uri_path)
{
const char *repos_path;
derr = dav_svn_split_uri(ctx->r,
- uri,
+ uri_path,
ctx->cfg->base_path,
&cleaned_uri,
&trailing_slash,
@@ -194,7 +242,7 @@ is_this_legal(dontdothat_filter_ctx *ctx, const char *uri)
if (! repos_path)
repos_path = "";
- repos_path = apr_psprintf(ctx->r->pool, "/%s", repos_path);
+ repos_path = svn_fspath__canonicalize(repos_path, ctx->r->pool);
/* First check the special cases that are always legal... */
for (idx = 0; idx < ctx->allow_recursive_ops->nelts; ++idx)
@@ -228,6 +276,19 @@ is_this_legal(dontdothat_filter_ctx *ctx, const char *uri)
}
}
}
+ else
+ {
+ log_dav_err(ctx->r, derr, APLOG_ERR);
+ return FALSE;
+ }
+
+ }
+ else
+ {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r,
+ "mod_dontdothat: empty uri passed to is_this_legal(), "
+ "module bug?");
+ return FALSE;
}
return TRUE;
@@ -271,7 +332,7 @@ dontdothat_filter(ap_filter_t *f,
return rv;
}
- if (! XML_Parse(ctx->xmlp, str, len, last))
+ if (! XML_Parse(ctx->xmlp, str, (int)len, last))
{
/* let_it_go so we clean up our parser, no_soup_for_you so that we
* bail out before bothering to parse this stuff a second time. */
@@ -584,7 +645,8 @@ dontdothat_insert_filters(request_rec *r)
/* XXX is there a way to error out from this point? Would be nice... */
- err = svn_config_read(&config, cfg->config_file, TRUE, r->pool);
+ err = svn_config_read3(&config, cfg->config_file, TRUE,
+ FALSE, TRUE, r->pool);
if (err)
{
char buff[256];
diff --git a/tools/server-side/svn-backup-dumps.py b/tools/server-side/svn-backup-dumps.py
index bb6b235..1fd2363 100755
--- a/tools/server-side/svn-backup-dumps.py
+++ b/tools/server-side/svn-backup-dumps.py
@@ -686,7 +686,7 @@ if __name__ == "__main__":
print("Everything OK.")
sys.exit(0)
else:
- print("An error occured!")
+ print("An error occurred!")
sys.exit(1)
# vim:et:ts=4:sw=4
diff --git a/tools/server-side/svn-rep-sharing-stats.c b/tools/server-side/svn-rep-sharing-stats.c
index e57ff91..f610409 100644
--- a/tools/server-side/svn-rep-sharing-stats.c
+++ b/tools/server-side/svn-rep-sharing-stats.c
@@ -34,6 +34,8 @@
/* for svn_fs_fs__id_* (used in assertions only) */
#include "../../subversion/libsvn_fs_fs/id.h"
+#include "private/svn_cmdline_private.h"
+
#include "svn_private_config.h"
@@ -42,8 +44,8 @@
static svn_error_t *
version(apr_pool_t *pool)
{
- return svn_opt_print_help3(NULL, "svn-rep-sharing-stats", TRUE, FALSE, NULL,
- NULL, NULL, NULL, NULL, NULL, pool);
+ return svn_opt_print_help4(NULL, "svn-rep-sharing-stats", TRUE, FALSE, FALSE,
+ NULL, NULL, NULL, NULL, NULL, NULL, pool);
}
static void
@@ -91,8 +93,8 @@ check_lib_versions(void)
{ "svn_fs", svn_fs_version },
{ NULL, NULL }
};
-
SVN_VERSION_DEFINE(my_version);
+
return svn_error_trace(svn_ver_check_list(&my_version, checklist));
}
@@ -421,7 +423,6 @@ int
main(int argc, const char *argv[])
{
const char *repos_path;
- apr_allocator_t *allocator;
apr_pool_t *pool;
svn_boolean_t prop = FALSE, data = FALSE;
svn_boolean_t quiet = FALSE;
@@ -446,13 +447,7 @@ main(int argc, const char *argv[])
/* Create our top-level pool. Use a separate mutexless allocator,
* given this application is single threaded.
*/
- if (apr_allocator_create(&allocator))
- return EXIT_FAILURE;
-
- apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);
-
- pool = svn_pool_create_ex(NULL, allocator);
- apr_allocator_owner_set(allocator, pool);
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
/* Check library versions */
err = check_lib_versions();
diff --git a/tools/server-side/svnauthz-validate.c b/tools/server-side/svnauthz-validate.c
deleted file mode 100644
index df7d541..0000000
--- a/tools/server-side/svnauthz-validate.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * svnauthz-validate.c : Load and validate an authz file.
- *
- * ====================================================================
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- * ====================================================================
- *
- *
- * svnauthz-validate.c : load and validate an authz file, returns
- * value == 0 if syntax of authz file is correct
- * value == 1 if syntax of authz file is invalid or file not found
- * value == 2 in case of general error
- *
- */
-
-#include "svn_pools.h"
-#include "svn_repos.h"
-#include "svn_cmdline.h"
-
-int
-main(int argc, const char **argv)
-{
- apr_pool_t *pool;
- svn_error_t *err;
- svn_authz_t *authz;
- const char *authz_file;
-
- if (argc <= 1)
- {
- printf("Usage: %s PATH \n\n", argv[0]);
- printf("Loads the authz file at PATH and validates its syntax. \n"
- "Returns:\n"
- " 0 when syntax is OK.\n"
- " 1 when syntax is invalid.\n"
- " 2 operational error\n");
- return 2;
- }
-
- authz_file = argv[1];
-
- /* Initialize the app. Send all error messages to 'stderr'. */
- if (svn_cmdline_init(argv[0], stderr) != EXIT_SUCCESS)
- return 2;
-
- pool = svn_pool_create(NULL);
-
- /* Read the access file and validate it. */
- err = svn_repos_authz_read(&authz, authz_file, TRUE, pool);
-
- svn_pool_destroy(pool);
-
- if (err)
- {
- svn_handle_error2(err, stderr, FALSE, "svnauthz-validate: ");
- return 1;
- }
- else
- {
- return 0;
- }
-}
diff --git a/tools/server-side/svnauthz.c b/tools/server-side/svnauthz.c
new file mode 100644
index 0000000..ab8c62d
--- /dev/null
+++ b/tools/server-side/svnauthz.c
@@ -0,0 +1,771 @@
+/*
+ * svnauthz.c : Tool for working with authz files.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_cmdline.h"
+#include "svn_dirent_uri.h"
+#include "svn_opt.h"
+#include "svn_pools.h"
+#include "svn_repos.h"
+#include "svn_utf.h"
+#include "svn_path.h"
+
+#include "private/svn_fspath.h"
+#include "private/svn_cmdline_private.h"
+
+
+/*** Option Processing. ***/
+
+enum svnauthz__cmdline_options_t
+{
+ svnauthz__version = SVN_OPT_FIRST_LONGOPT_ID,
+ svnauthz__username,
+ svnauthz__path,
+ svnauthz__repos,
+ svnauthz__is,
+ svnauthz__groups_file
+};
+
+/* Option codes and descriptions.
+ *
+ * The entire list must be terminated with an entry of nulls.
+ */
+static const apr_getopt_option_t options_table[] =
+{
+ {"help", 'h', 0, ("show help on a subcommand")},
+ {NULL, '?', 0, ("show help on a subcommand")},
+ {"version", svnauthz__version, 0, ("show program version information")},
+ {"username", svnauthz__username, 1, ("username to check access of")},
+ {"path", svnauthz__path, 1, ("path within repository to check access of")},
+ {"repository", svnauthz__repos, 1, ("repository authz name")},
+ {"transaction", 't', 1, ("transaction id")},
+ {"is", svnauthz__is, 1,
+ ("instead of outputting, test if the access is\n"
+ " "
+ "exactly ARG\n"
+ " "
+ "ARG can be one of the following values:\n"
+ " "
+ " rw write access (which also implies read)\n"
+ " "
+ " r read-only access\n"
+ " "
+ " no no access")
+ },
+ {"groups-file", svnauthz__groups_file, 1,
+ ("use the groups from file ARG")},
+ {"recursive", 'R', 0,
+ ("determine recursive access to PATH")},
+ {0, 0, 0, 0}
+};
+
+struct svnauthz_opt_state
+{
+ svn_boolean_t help;
+ svn_boolean_t version;
+ svn_boolean_t recursive;
+ const char *authz_file;
+ const char *groups_file;
+ const char *username;
+ const char *fspath;
+ const char *repos_name;
+ const char *txn;
+ const char *repos_path;
+ const char *is;
+};
+
+/* The name of this binary in 1.7 and earlier. */
+#define SVNAUTHZ_COMPAT_NAME "svnauthz-validate"
+
+/* Libtool command prefix */
+#define SVNAUTHZ_LT_PREFIX "lt-"
+
+
+/*** Subcommands. */
+
+static svn_opt_subcommand_t
+ subcommand_help,
+ subcommand_validate,
+ subcommand_accessof;
+
+/* Array of available subcommands.
+ * The entire list must be terminated with an entry of nulls.
+ */
+static const svn_opt_subcommand_desc2_t cmd_table[] =
+{
+ {"help", subcommand_help, {"?", "h"},
+ ("usage: svnauthz help [SUBCOMMAND...]\n\n"
+ "Describe the usage of this program or its subcommands.\n"),
+ {0} },
+ {"validate", subcommand_validate, {0} /* no aliases */,
+ ("Checks the syntax of an authz file.\n"
+ "usage: 1. svnauthz validate TARGET\n"
+ " 2. svnauthz validate --transaction TXN REPOS_PATH FILE_PATH\n\n"
+ " 1. Loads and validates the syntax of the authz file at TARGET.\n"
+ " TARGET can be a path to a file or an absolute file:// URL to an authz\n"
+ " file in a repository, but cannot be a repository relative URL (^/).\n\n"
+ " 2. Loads and validates the syntax of the authz file at FILE_PATH in the\n"
+ " transaction TXN in the repository at REPOS_PATH.\n\n"
+ "Returns:\n"
+ " 0 when syntax is OK.\n"
+ " 1 when syntax is invalid.\n"
+ " 2 operational error\n"
+ ),
+ {'t'} },
+ {"accessof", subcommand_accessof, {0} /* no aliases */,
+ ("Print or test the permissions set by an authz file.\n"
+ "usage: 1. svnauthz accessof TARGET\n"
+ " 2. svnauthz accessof -t TXN REPOS_PATH FILE_PATH\n"
+ "\n"
+ " 1. Prints the access of USER to PATH based on authorization file at TARGET.\n"
+ " TARGET can be a path to a file or an absolute file:// URL to an authz\n"
+ " file in a repository, but cannot be a repository relative URL (^/).\n"
+ "\n"
+ " 2. Prints the access of USER to PATH based on authz file at FILE_PATH in the\n"
+ " transaction TXN in the repository at REPOS_PATH.\n"
+ "\n"
+ " USER is the argument to the --username option; if that option is not\n"
+ " provided, then access of an anonymous user will be printed or tested.\n"
+ "\n"
+ " PATH is the argument to the --path option; if that option is not provided,\n"
+ " the maximal access to any path in the repository will be considered.\n"
+ "\n"
+ "Outputs one of the following:\n"
+ " rw write access (which also implies read)\n"
+ " r read access\n"
+ " no no access\n"
+ "\n"
+ "Returns:\n"
+ " 0 when syntax is OK and '--is' argument (if any) matches.\n"
+ " 1 when syntax is invalid.\n"
+ " 2 operational error\n"
+ " 3 when '--is' argument doesn't match\n"
+ ),
+ {'t', svnauthz__username, svnauthz__path, svnauthz__repos, svnauthz__is,
+ svnauthz__groups_file, 'R'} },
+ { NULL, NULL, {0}, NULL, {0} }
+};
+
+static svn_error_t *
+subcommand_help(apr_getopt_t *os, void *baton, apr_pool_t *pool)
+{
+ struct svnauthz_opt_state *opt_state = baton;
+ const char *header =
+ ("general usage: svnauthz SUBCOMMAND TARGET [ARGS & OPTIONS ...]\n"
+ " " SVNAUTHZ_COMPAT_NAME " TARGET\n\n"
+ "If the command name starts with '" SVNAUTHZ_COMPAT_NAME "', runs in\n"
+ "pre-1.8 compatibility mode: run the 'validate' subcommand on TARGET.\n\n"
+ "Type 'svnauthz help <subcommand>' for help on a specific subcommand.\n"
+ "Type 'svnauthz --version' to see the program version.\n\n"
+ "Available subcommands:\n");
+
+ const char *fs_desc_start
+ = ("The following repository back-end (FS) modules are available:\n\n");
+
+ svn_stringbuf_t *version_footer;
+
+ version_footer = svn_stringbuf_create(fs_desc_start, pool);
+ SVN_ERR(svn_fs_print_modules(version_footer, pool));
+
+ SVN_ERR(svn_opt_print_help4(os, "svnauthz",
+ opt_state ? opt_state->version : FALSE,
+ FALSE, /* quiet */
+ FALSE, /* verbose */
+ version_footer->data,
+ header, cmd_table, options_table, NULL, NULL,
+ pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Loads the fs FILENAME contents into *CONTENTS ensuring that the
+ corresponding node is a file. Using POOL for allocations. */
+static svn_error_t *
+read_file_contents(svn_stream_t **contents, const char *filename,
+ svn_fs_root_t *root, apr_pool_t *pool)
+{
+ svn_node_kind_t node_kind;
+
+ /* Make sure the path is a file */
+ SVN_ERR(svn_fs_check_path(&node_kind, root, filename, pool));
+ if (node_kind != svn_node_file)
+ return svn_error_createf(SVN_ERR_FS_NOT_FILE, NULL,
+ "Path '%s' is not a file", filename);
+
+ SVN_ERR(svn_fs_file_contents(contents, root, filename, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Loads the authz config into *AUTHZ from the file at AUTHZ_FILE
+ in repository at REPOS_PATH from the transaction TXN_NAME. If GROUPS_FILE
+ is set, the resulting *AUTHZ will be constructed from AUTHZ_FILE with
+ global groups taken from GROUPS_FILE. Using POOL for allocations. */
+static svn_error_t *
+get_authz_from_txn(svn_authz_t **authz, const char *repos_path,
+ const char *authz_file, const char *groups_file,
+ const char *txn_name, apr_pool_t *pool)
+{
+ svn_repos_t *repos;
+ svn_fs_t *fs;
+ svn_fs_txn_t *txn;
+ svn_fs_root_t *root;
+ svn_stream_t *authz_contents;
+ svn_stream_t *groups_contents;
+ svn_error_t *err;
+
+ /* Open up the repository and find the transaction root */
+ SVN_ERR(svn_repos_open2(&repos, repos_path, NULL, pool));
+ fs = svn_repos_fs(repos);
+ SVN_ERR(svn_fs_open_txn(&txn, fs, txn_name, pool));
+ SVN_ERR(svn_fs_txn_root(&root, txn, pool));
+
+ /* Get the authz file contents. */
+ SVN_ERR(read_file_contents(&authz_contents, authz_file, root, pool));
+
+ /* Get the groups file contents if needed. */
+ if (groups_file)
+ SVN_ERR(read_file_contents(&groups_contents, groups_file, root, pool));
+ else
+ groups_contents = NULL;
+
+ err = svn_repos_authz_parse(authz, authz_contents, groups_contents, pool);
+
+ /* Add the filename to the error stack since the parser doesn't have it. */
+ if (err != SVN_NO_ERROR)
+ return svn_error_createf(err->apr_err, err,
+ "Error parsing authz file: '%s':", authz_file);
+
+ return SVN_NO_ERROR;
+}
+
+/* Loads the authz config into *AUTHZ from OPT_STATE->AUTHZ_FILE. If
+ OPT_STATE->GROUPS_FILE is set, loads the global groups from it.
+ If OPT_STATE->TXN is set then OPT_STATE->AUTHZ_FILE and
+ OPT_STATE->GROUPS_FILE are treated as fspaths in repository at
+ OPT_STATE->REPOS_PATH. */
+static svn_error_t *
+get_authz(svn_authz_t **authz, struct svnauthz_opt_state *opt_state,
+ apr_pool_t *pool)
+{
+ /* Read the access file and validate it. */
+ if (opt_state->txn)
+ return get_authz_from_txn(authz, opt_state->repos_path,
+ opt_state->authz_file,
+ opt_state->groups_file,
+ opt_state->txn, pool);
+
+ /* Else */
+ return svn_repos_authz_read2(authz, opt_state->authz_file,
+ opt_state->groups_file,
+ TRUE, pool);
+}
+
+static svn_error_t *
+subcommand_validate(apr_getopt_t *os, void *baton, apr_pool_t *pool)
+{
+ struct svnauthz_opt_state *opt_state = baton;
+ svn_authz_t *authz;
+
+ /* Not much to do here since just loading the authz file also validates. */
+ return get_authz(&authz, opt_state, pool);
+}
+
+static svn_error_t *
+subcommand_accessof(apr_getopt_t *os, void *baton, apr_pool_t *pool)
+{
+ svn_authz_t *authz;
+ svn_boolean_t read_access = FALSE, write_access = FALSE;
+ svn_boolean_t check_r = FALSE, check_rw = FALSE, check_no = FALSE;
+ svn_error_t *err;
+ struct svnauthz_opt_state *opt_state = baton;
+ const char *user = opt_state->username;
+ const char *path = opt_state->fspath;
+ const char *repos = opt_state->repos_name;
+ const char *is = opt_state->is;
+ svn_repos_authz_access_t request;
+
+ if (opt_state->recursive && !path)
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("--recursive not valid without --path"));
+
+ /* Handle is argument parsing/allowed values */
+ if (is) {
+ if (0 == strcmp(is, "rw"))
+ check_rw = TRUE;
+ else if (0 == strcmp(is, "r"))
+ check_r = TRUE;
+ else if (0 == strcmp(is, "no"))
+ check_no = TRUE;
+ else
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("'%s' is not a valid argument for --is"), is);
+ }
+
+ SVN_ERR(get_authz(&authz, opt_state, pool));
+
+
+ request = svn_authz_write;
+ if (opt_state->recursive)
+ request |= svn_authz_recursive;
+ err = svn_repos_authz_check_access(authz, repos, path, user,
+ request, &write_access,
+ pool);
+
+ if (!write_access && !err)
+ {
+ request = svn_authz_read;
+ if (opt_state->recursive)
+ request |= svn_authz_recursive;
+ err = svn_repos_authz_check_access(authz, repos, path, user,
+ request, &read_access,
+ pool);
+ }
+
+ if (!err)
+ {
+ const char *access_str = write_access ? "rw" : read_access ? "r" : "no";
+
+ if (is)
+ {
+ /* Check that --is argument matches.
+ * The errors returned here are not strictly correct, but
+ * none of the other code paths will generate them and they
+ * roughly mean what we're saying here. */
+ if (check_rw && !write_access)
+ err = svn_error_createf(SVN_ERR_AUTHZ_UNWRITABLE, NULL,
+ ("%s is '%s', not writable"),
+ path ? path : ("Repository"), access_str);
+ else if (check_r && !read_access)
+ err = svn_error_createf(SVN_ERR_AUTHZ_UNREADABLE, NULL,
+ ("%s is '%s', not read only"),
+ path ? path : ("Repository"), access_str);
+ else if (check_no && (read_access || write_access))
+ err = svn_error_createf(SVN_ERR_AUTHZ_PARTIALLY_READABLE,
+ NULL, ("%s is '%s', not no access"),
+ path ? path : ("Repository"), access_str);
+ }
+ else
+ {
+ err = svn_cmdline_printf(pool, "%s\n", access_str);
+ }
+ }
+
+ return err;
+}
+
+
+
+/*** Main. ***/
+
+/* A redefinition of EXIT_FAILURE since our contract demands that we
+ exit with 2 for internal failures. */
+#undef EXIT_FAILURE
+#define EXIT_FAILURE 2
+
+/* Similar to svn_cmdline_handle_exit_error but with an exit_code argument
+ so we can comply with our contract and exit with 2 for internal failures.
+ Also is missing the pool argument since we don't need it given
+ main/sub_main. */
+static int
+handle_exit_error(svn_error_t *err, const char *prefix, int exit_code)
+{
+ /* Issue #3014:
+ * Don't print anything on broken pipes. The pipe was likely
+ * closed by the process at the other end. We expect that
+ * process to perform error reporting as necessary.
+ *
+ * ### This assumes that there is only one error in a chain for
+ * ### SVN_ERR_IO_PIPE_WRITE_ERROR. See svn_cmdline_fputs(). */
+ if (err->apr_err != SVN_ERR_IO_PIPE_WRITE_ERROR)
+ svn_handle_error2(err, stderr, FALSE, prefix);
+ svn_error_clear(err);
+ return exit_code;
+}
+
+/* Report and clear the error ERR, and return EXIT_FAILURE. */
+#define EXIT_ERROR(err, exit_code) \
+ handle_exit_error(err, "svnauthz: ", exit_code)
+
+/* A redefinition of the public SVN_INT_ERR macro, that suppresses the
+ * error message if it is SVN_ERR_IO_PIPE_WRITE_ERROR, amd with the
+ * program name 'svnauthz' instead of 'svn'. */
+#undef SVN_INT_ERR
+#define SVN_INT_ERR(expr) \
+ do { \
+ svn_error_t *svn_err__temp = (expr); \
+ if (svn_err__temp) \
+ return EXIT_ERROR(svn_err__temp, EXIT_FAILURE); \
+ } while (0)
+
+
+/* Return TRUE if the UI of 'svnauthz-validate' (svn 1.7 and earlier)
+ should be emulated, given argv[0]. */
+static svn_boolean_t
+use_compat_mode(const char *cmd, apr_pool_t *pool)
+{
+ cmd = svn_dirent_internal_style(cmd, pool);
+ cmd = svn_dirent_basename(cmd, NULL);
+
+ /* Skip over the Libtool command prefix if it exists on the command. */
+ if (0 == strncmp(SVNAUTHZ_LT_PREFIX, cmd, sizeof(SVNAUTHZ_LT_PREFIX)-1))
+ cmd += sizeof(SVNAUTHZ_LT_PREFIX) - 1;
+
+ /* Deliberately look only for the start of the name to deal with
+ the executable extension on some platforms. */
+ return 0 == strncmp(SVNAUTHZ_COMPAT_NAME, cmd,
+ sizeof(SVNAUTHZ_COMPAT_NAME)-1);
+}
+
+/* Canonicalize ACCESS_FILE into *CANONICALIZED_ACCESS_FILE based on the type
+ of argument. Error out on unsupported path types. If WITHIN_TXN is set,
+ ACCESS_FILE has to be a fspath in the repo. Use POOL for allocations. */
+static svn_error_t *
+canonicalize_access_file(const char **canonicalized_access_file,
+ const char *access_file,
+ svn_boolean_t within_txn,
+ apr_pool_t *pool)
+{
+ if (svn_path_is_repos_relative_url(access_file))
+ {
+ /* Can't accept repos relative urls since we don't have the path to
+ * the repository. */
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("'%s' is a repository relative URL when it "
+ "should be a local path or file:// URL"),
+ access_file);
+ }
+ else if (svn_path_is_url(access_file))
+ {
+ if (within_txn)
+ {
+ /* Don't allow urls with transaction argument. */
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("'%s' is a URL when it should be a "
+ "repository-relative path"),
+ access_file);
+ }
+
+ *canonicalized_access_file = svn_uri_canonicalize(access_file, pool);
+ }
+ else if (within_txn)
+ {
+ /* Transaction flag means this has to be a fspath to the access file
+ * in the repo. */
+ *canonicalized_access_file =
+ svn_fspath__canonicalize(access_file, pool);
+ }
+ else
+ {
+ /* If it isn't a URL and there's no transaction flag then it's a
+ * dirent to the access file on local disk. */
+ *canonicalized_access_file =
+ svn_dirent_internal_style(access_file, pool);
+ }
+
+ return SVN_NO_ERROR;
+}
+
+static int
+sub_main(int argc, const char *argv[], apr_pool_t *pool)
+{
+ svn_error_t *err;
+
+ const svn_opt_subcommand_desc2_t *subcommand = NULL;
+ struct svnauthz_opt_state opt_state = { 0 };
+ apr_getopt_t *os;
+ apr_array_header_t *received_opts;
+ int i;
+
+ /* Initialize the FS library. */
+ SVN_INT_ERR(svn_fs_initialize(pool));
+
+ received_opts = apr_array_make(pool, SVN_OPT_MAX_OPTIONS, sizeof(int));
+
+ /* Initialize opt_state */
+ opt_state.username = opt_state.fspath = opt_state.repos_name = NULL;
+ opt_state.txn = opt_state.repos_path = opt_state.groups_file = NULL;
+
+ /* Parse options. */
+ SVN_INT_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
+ os->interleave = 1;
+
+ if (!use_compat_mode(argv[0], pool))
+ {
+ while (1)
+ {
+ int opt;
+ const char *arg;
+ apr_status_t status = apr_getopt_long(os, options_table, &opt, &arg);
+
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ if (status != APR_SUCCESS)
+ {
+ SVN_INT_ERR(subcommand_help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+
+ /* Stash the option code in an array before parsing it. */
+ APR_ARRAY_PUSH(received_opts, int) = opt;
+
+ switch (opt)
+ {
+ case 'h':
+ case '?':
+ opt_state.help = TRUE;
+ break;
+ case 't':
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.txn, arg, pool));
+ break;
+ case 'R':
+ opt_state.recursive = TRUE;
+ break;
+ case svnauthz__version:
+ opt_state.version = TRUE;
+ break;
+ case svnauthz__username:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.username, arg, pool));
+ break;
+ case svnauthz__path:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.fspath, arg, pool));
+ opt_state.fspath = svn_fspath__canonicalize(opt_state.fspath,
+ pool);
+ break;
+ case svnauthz__repos:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.repos_name, arg, pool));
+ break;
+ case svnauthz__is:
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.is, arg, pool));
+ break;
+ case svnauthz__groups_file:
+ SVN_INT_ERR(
+ svn_utf_cstring_to_utf8(&opt_state.groups_file,
+ arg, pool));
+ break;
+ default:
+ {
+ SVN_INT_ERR(subcommand_help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Pre 1.8 compatibility mode. */
+ if (argc == 1) /* No path argument */
+ subcommand = svn_opt_get_canonical_subcommand2(cmd_table, "help");
+ else
+ subcommand = svn_opt_get_canonical_subcommand2(cmd_table, "validate");
+ }
+
+ /* If the user asked for help, then the rest of the arguments are
+ the names of subcommands to get help on (if any), or else they're
+ just typos/mistakes. Whatever the case, the subcommand to
+ actually run is subcommand_help(). */
+ if (opt_state.help)
+ subcommand = svn_opt_get_canonical_subcommand2(cmd_table, "help");
+
+ if (subcommand == NULL)
+ {
+ if (os->ind >= os->argc)
+ {
+ if (opt_state.version)
+ {
+ /* Use the "help" subcommand to handle the "--version" option. */
+ static const svn_opt_subcommand_desc2_t pseudo_cmd =
+ { "--version", subcommand_help, {0}, "",
+ {svnauthz__version /* must accept its own option */ } };
+
+ subcommand = &pseudo_cmd;
+ }
+ else
+ {
+ svn_error_clear(svn_cmdline_fprintf(stderr, pool,
+ ("subcommand argument required\n")));
+ SVN_INT_ERR(subcommand_help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+ }
+ else
+ {
+ const char *first_arg = os->argv[os->ind++];
+ subcommand = svn_opt_get_canonical_subcommand2(cmd_table, first_arg);
+ if (subcommand == NULL)
+ {
+ const char *first_arg_utf8;
+
+ os->ind++;
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&first_arg_utf8,
+ first_arg, pool));
+ svn_error_clear(
+ svn_cmdline_fprintf(stderr, pool,
+ ("Unknown subcommand: '%s'\n"),
+ first_arg_utf8));
+ SVN_INT_ERR(subcommand_help(NULL, NULL, pool));
+ return EXIT_FAILURE;
+ }
+ }
+ }
+
+ /* Every subcommand except `help' requires one or two non-option arguments.
+ Parse them and store them in opt_state.*/
+ if (subcommand->cmd_func != subcommand_help)
+ {
+ /* Consume a non-option argument (repos_path) if --transaction */
+ if (opt_state.txn)
+ {
+ if (os->ind +2 != argc)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("Repository and authz file arguments "
+ "required"));
+ return EXIT_ERROR(err, EXIT_FAILURE);
+ }
+
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.repos_path, os->argv[os->ind],
+ pool));
+ os->ind++;
+
+ opt_state.repos_path = svn_dirent_internal_style(opt_state.repos_path, pool);
+ }
+
+ /* Exactly 1 non-option argument */
+ if (os->ind + 1 != argc)
+ {
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ ("Authz file argument required"));
+ return EXIT_ERROR(err, EXIT_FAILURE);
+ }
+
+ /* Grab AUTHZ_FILE from argv. */
+ SVN_INT_ERR(svn_utf_cstring_to_utf8(&opt_state.authz_file, os->argv[os->ind],
+ pool));
+
+ /* Canonicalize opt_state.authz_file appropriately. */
+ SVN_INT_ERR(canonicalize_access_file(&opt_state.authz_file,
+ opt_state.authz_file,
+ opt_state.txn != NULL, pool));
+
+ /* Same for opt_state.groups_file if it is present. */
+ if (opt_state.groups_file)
+ {
+ SVN_INT_ERR(canonicalize_access_file(&opt_state.groups_file,
+ opt_state.groups_file,
+ opt_state.txn != NULL, pool));
+ }
+ }
+
+ /* Check that the subcommand wasn't passed any inappropriate options. */
+ for (i = 0; i < received_opts->nelts; i++)
+ {
+ int opt_id = APR_ARRAY_IDX(received_opts, i, int);
+
+ /* All commands implicitly accept --help, so just skip over this
+ when we see it. Note that we don't want to include this option
+ in their "accepted options" list because it would be awfully
+ redundant to display it in every commands' help text. */
+ if (opt_id == 'h' || opt_id == '?')
+ continue;
+
+ if (! svn_opt_subcommand_takes_option3(subcommand, opt_id, NULL))
+ {
+ const char *optstr;
+ const apr_getopt_option_t *badopt =
+ svn_opt_get_option_from_code2(opt_id, options_table, subcommand,
+ pool);
+ svn_opt_format_option(&optstr, badopt, FALSE, pool);
+ if (subcommand->name[0] == '-')
+ SVN_INT_ERR(subcommand_help(NULL, NULL, pool));
+ else
+ svn_error_clear(svn_cmdline_fprintf(stderr, pool,
+ ("Subcommand '%s' doesn't accept option '%s'\n"
+ "Type 'svnauthz help %s' for usage.\n"),
+ subcommand->name, optstr, subcommand->name));
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Run the subcommand. */
+ err = (*subcommand->cmd_func)(os, &opt_state, pool);
+
+ if (err)
+ {
+ if (err->apr_err == SVN_ERR_CL_INSUFFICIENT_ARGS
+ || err->apr_err == SVN_ERR_CL_ARG_PARSING_ERROR)
+ {
+ /* For argument-related problems, suggest using the 'help'
+ subcommand. */
+ err = svn_error_quick_wrap(err,
+ ("Try 'svnauthz help' for more info"));
+ }
+ else if (err->apr_err == SVN_ERR_AUTHZ_INVALID_CONFIG
+ || err->apr_err == SVN_ERR_MALFORMED_FILE)
+ {
+ /* Follow our contract that says we exit with 1 if the file does not
+ validate. */
+ return EXIT_ERROR(err, 1);
+ }
+ else if (err->apr_err == SVN_ERR_AUTHZ_UNREADABLE
+ || err->apr_err == SVN_ERR_AUTHZ_UNWRITABLE
+ || err->apr_err == SVN_ERR_AUTHZ_PARTIALLY_READABLE)
+ {
+ /* Follow our contract that says we exit with 3 if --is does not
+ * match. */
+ return EXIT_ERROR(err, 3);
+ }
+
+
+ return EXIT_ERROR(err, EXIT_FAILURE);
+ }
+ else
+ {
+ /* Ensure that everything is written to stdout, so the user will
+ see any print errors. */
+ err = svn_cmdline_fflush(stdout);
+ if (err)
+ {
+ return EXIT_ERROR(err, EXIT_FAILURE);
+ }
+ return EXIT_SUCCESS;
+ }
+
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code;
+
+ /* Initialize the app. Send all error messages to 'stderr'. */
+ if (svn_cmdline_init(argv[0], stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ pool = svn_pool_create(NULL);
+
+ exit_code = sub_main(argc, argv, pool);
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/server-side/svnpredumpfilter.py b/tools/server-side/svnpredumpfilter.py
new file mode 100755
index 0000000..5a74755
--- /dev/null
+++ b/tools/server-side/svnpredumpfilter.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+"""\
+Usage: 1. {PROGRAM} [OPTIONS] include INCLUDE-PATH ...
+ 2. {PROGRAM} [OPTIONS] exclude EXCLUDE-PATH ...
+
+Read a Subversion revision log output stream from stdin, analyzing its
+revision log history to see what paths would need to be additionally
+provided as part of the list of included/excluded paths if trying to
+use Subversion's 'svndumpfilter' program to include/exclude paths from
+a full dump of a repository's history.
+
+The revision log stream should be the result of 'svn log -v' or 'svn
+log -vq' when run against the root of the repository whose history
+will be filtered by a user with universal read access to the
+repository's data. Do not use the --use-merge-history (-g) or
+--stop-on-copy when generating this revision log stream.
+Use the default ordering of revisions (that is, '-r HEAD:0').
+
+Return errorcode 0 if there are no additional dependencies found, 1 if
+there were; any other errorcode indicates a fatal error.
+
+Options:
+
+ --help (-h) Show this usage message and exit.
+
+ --targets FILE Read INCLUDE-PATHs and EXCLUDE-PATHs from FILE,
+ one path per line.
+
+ --verbose (-v) Provide more information. May be used multiple
+ times for additional levels of information (-vv).
+"""
+import sys
+import os
+import getopt
+import string
+
+verbosity = 0
+
+class LogStreamError(Exception): pass
+class EOFError(Exception): pass
+
+EXIT_SUCCESS = 0
+EXIT_MOREDEPS = 1
+EXIT_FAILURE = 2
+
+def sanitize_path(path):
+ return '/'.join(filter(None, path.split('/')))
+
+def subsumes(path, maybe_child):
+ if path == maybe_child:
+ return True
+ if maybe_child.find(path + '/') == 0:
+ return True
+ return False
+
+def compare_paths(path1, path2):
+ # Are the paths exactly the same?
+ if path1 == path2:
+ return 0
+
+ # Skip past common prefix
+ path1_len = len(path1);
+ path2_len = len(path2);
+ min_len = min(path1_len, path2_len)
+ i = 0
+ while (i < min_len) and (path1[i] == path2[i]):
+ i = i + 1
+
+ # Children of paths are greater than their parents, but less than
+ # greater siblings of their parents
+ char1 = '\0'
+ char2 = '\0'
+ if (i < path1_len):
+ char1 = path1[i]
+ if (i < path2_len):
+ char2 = path2[i]
+
+ if (char1 == '/') and (i == path2_len):
+ return 1
+ if (char2 == '/') and (i == path1_len):
+ return -1
+ if (i < path1_len) and (char1 == '/'):
+ return -1
+ if (i < path2_len) and (char2 == '/'):
+ return 1
+
+ # Common prefix was skipped above, next character is compared to
+ # determine order
+ return cmp(char1, char2)
+
+def log(msg, min_verbosity):
+ if verbosity >= min_verbosity:
+ if min_verbosity == 1:
+ sys.stderr.write("[* ] ")
+ elif min_verbosity == 2:
+ sys.stderr.write("[**] ")
+ sys.stderr.write(msg + "\n")
+
+class DependencyTracker:
+ def __init__(self, include_paths):
+ self.include_paths = include_paths[:]
+ self.dependent_paths = []
+
+ def path_included(self, path):
+ for include_path in self.include_paths + self.dependent_paths:
+ if subsumes(include_path, path):
+ return True
+ return False
+
+ def handle_changes(self, path_copies):
+ for path, copyfrom_path in path_copies.items():
+ if self.path_included(path) and copyfrom_path:
+ if not self.path_included(copyfrom_path):
+ self.dependent_paths.append(copyfrom_path)
+
+def readline(stream):
+ line = stream.readline()
+ if not line:
+ raise EOFError("Unexpected end of stream")
+ line = line.rstrip('\n\r')
+ log(line, 2)
+ return line
+
+def svn_log_stream_get_dependencies(stream, included_paths):
+ import re
+
+ dt = DependencyTracker(included_paths)
+
+ header_re = re.compile(r'^r([0-9]+) \|.*$')
+ action_re = re.compile(r'^ [ADMR] /(.*)$')
+ copy_action_re = re.compile(r'^ [AR] /(.*) \(from /(.*):[0-9]+\)$')
+ line_buf = None
+ last_revision = 0
+ eof = False
+ path_copies = {}
+ found_changed_path = False
+
+ while not eof:
+ try:
+ line = line_buf is not None and line_buf or readline(stream)
+ except EOFError:
+ break
+
+ # We should be sitting at a log divider line.
+ if line != '-' * 72:
+ raise LogStreamError("Expected log divider line; not found.")
+
+ # Next up is a log header line.
+ try:
+ line = readline(stream)
+ except EOFError:
+ break
+ match = header_re.search(line)
+ if not match:
+ raise LogStreamError("Expected log header line; not found.")
+ pieces = map(string.strip, line.split('|'))
+ revision = int(pieces[0][1:])
+ if last_revision and revision >= last_revision:
+ raise LogStreamError("Revisions are misordered. Make sure log stream "
+ "is from 'svn log' with the youngest revisions "
+ "before the oldest ones (the default ordering).")
+ log("Parsing revision %d" % (revision), 1)
+ last_revision = revision
+ idx = pieces[-1].find(' line')
+ if idx != -1:
+ log_lines = int(pieces[-1][:idx])
+ else:
+ log_lines = 0
+
+ # Now see if there are any changed paths. If so, parse and process them.
+ line = readline(stream)
+ if line == 'Changed paths:':
+ while 1:
+ try:
+ line = readline(stream)
+ except EOFError:
+ eof = True
+ break
+ match = action_re.search(line)
+ if match:
+ found_changed_path = True
+ match = copy_action_re.search(line)
+ if match:
+ path_copies[sanitize_path(match.group(1))] = \
+ sanitize_path(match.group(2))
+ else:
+ break
+ dt.handle_changes(path_copies)
+
+ # Finally, skip any log message lines. (If there are none,
+ # remember the last line we read, because it probably has
+ # something important in it.)
+ if log_lines:
+ for i in range(log_lines):
+ readline(stream)
+ line_buf = None
+ else:
+ line_buf = line
+
+ if not found_changed_path:
+ raise LogStreamError("No changed paths found; did you remember to run "
+ "'svn log' with the --verbose (-v) option when "
+ "generating the input to this script?")
+
+ return dt
+
+def analyze_logs(included_paths):
+ print "Initial include paths:"
+ for path in included_paths:
+ print " + /%s" % (path)
+
+ dt = svn_log_stream_get_dependencies(sys.stdin, included_paths)
+
+ if dt.dependent_paths:
+ found_new_deps = True
+ print "Dependent include paths found:"
+ for path in dt.dependent_paths:
+ print " + /%s" % (path)
+ print "You need to also include them (or one of their parents)."
+ else:
+ found_new_deps = False
+ print "No new dependencies found!"
+ parents = {}
+ for path in dt.include_paths:
+ while 1:
+ parent = os.path.dirname(path)
+ if not parent:
+ break
+ parents[parent] = 1
+ path = parent
+ parents = parents.keys()
+ if parents:
+ print "You might still need to manually create parent directories " \
+ "for the included paths before loading a filtered dump:"
+ parents.sort(compare_paths)
+ for parent in parents:
+ print " /%s" % (parent)
+
+ return found_new_deps and EXIT_MOREDEPS or EXIT_SUCCESS
+
+def usage_and_exit(errmsg=None):
+ program = os.path.basename(sys.argv[0])
+ stream = errmsg and sys.stderr or sys.stdout
+ stream.write(__doc__.replace("{PROGRAM}", program))
+ if errmsg:
+ stream.write("\nERROR: %s\n" % (errmsg))
+ sys.exit(errmsg and EXIT_FAILURE or EXIT_SUCCESS)
+
+def main():
+ config_dir = None
+ targets_file = None
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "hv",
+ ["help", "verbose", "targets="])
+ except getopt.GetoptError, e:
+ usage_and_exit(str(e))
+
+ for option, value in opts:
+ if option in ['-h', '--help']:
+ usage_and_exit()
+ elif option in ['-v', '--verbose']:
+ global verbosity
+ verbosity = verbosity + 1
+ elif option in ['--targets']:
+ targets_file = value
+
+ if len(args) == 0:
+ usage_and_exit("Not enough arguments")
+
+ if targets_file is None:
+ targets = args[1:]
+ else:
+ targets = map(lambda x: x.rstrip('\n\r'),
+ open(targets_file, 'r').readlines())
+ if not targets:
+ usage_and_exit("No target paths specified")
+
+ try:
+ if args[0] == 'include':
+ sys.exit(analyze_logs(map(sanitize_path, targets)))
+ elif args[0] == 'exclude':
+ usage_and_exit("Feature not implemented")
+ else:
+ usage_and_exit("Valid subcommands are 'include' and 'exclude'")
+ except SystemExit:
+ raise
+ except (LogStreamError, EOFError), e:
+ log("ERROR: " + str(e), 0)
+ sys.exit(EXIT_FAILURE)
+ except:
+ import traceback
+ exc_type, exc, exc_tb = sys.exc_info()
+ tb = traceback.format_exception(exc_type, exc, exc_tb)
+ sys.stderr.write(''.join(tb))
+ sys.exit(EXIT_FAILURE)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/server-side/svnpubsub/README.txt b/tools/server-side/svnpubsub/README.txt
new file mode 100644
index 0000000..ad4975e
--- /dev/null
+++ b/tools/server-side/svnpubsub/README.txt
@@ -0,0 +1,24 @@
+Installation instructions:
+
+1. Set up an svnpubsub service.
+
+ This directory should be checked out to /usr/local/svnpubsub (or /opt/svnpubsub
+ on Debian).
+
+ There are init scripts for several OSes in the rc.d/ directory; add them
+ to your OS boot process in the usual way for your OS. (For example, via
+ rc.conf(5) or update-rc.d(8).)
+
+2. Run "commit-hook.py $REPOS $REV" from your post-commit hook.
+
+ (As of 1.7, these are the same ordered arguments the post-commmit hook
+ itself receives, so you can just symlink commit-hook.py as hooks/post-commit
+ hook if you don't need any other hooks to run in the server process. (This
+ isn't as insane as it sounds --- post-commit email hooks could also feed of
+ svnpubsub, and thus not be run within the committing server thread, but on
+ any other process or box that listens to the svnpubsub stream!))
+
+3. Set up svnpubsub clients.
+
+ (eg svnwcsub.py, svnpubsub/client.py,
+ 'curl -sN http://${hostname}:2069/commits')
diff --git a/tools/server-side/svnpubsub/commit-hook.py b/tools/server-side/svnpubsub/commit-hook.py
new file mode 100755
index 0000000..4a1a3f3
--- /dev/null
+++ b/tools/server-side/svnpubsub/commit-hook.py
@@ -0,0 +1,93 @@
+#!/usr/local/bin/python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+SVNLOOK="/usr/local/svn-install/current/bin/svnlook"
+#SVNLOOK="/usr/local/bin/svnlook"
+
+HOST="127.0.0.1"
+PORT=2069
+
+import sys
+import subprocess
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import urllib2
+
+def svncmd(cmd):
+ return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+
+def svncmd_uuid(repo):
+ cmd = "%s uuid %s" % (SVNLOOK, repo)
+ p = svncmd(cmd)
+ return p.stdout.read().strip()
+
+def svncmd_info(repo, revision):
+ cmd = "%s info -r %s %s" % (SVNLOOK, revision, repo)
+ p = svncmd(cmd)
+ data = p.stdout.read().split("\n")
+ #print data
+ return {'author': data[0].strip(),
+ 'date': data[1].strip(),
+ 'log': "\n".join(data[3:]).strip()}
+
+def svncmd_changed(repo, revision):
+ cmd = "%s changed -r %s %s" % (SVNLOOK, revision, repo)
+ p = svncmd(cmd)
+ changed = {}
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ line = line.strip()
+ (flags, filename) = (line[0:3], line[4:])
+ changed[filename] = {'flags': flags}
+ return changed
+
+def do_put(body):
+ opener = urllib2.build_opener(urllib2.HTTPHandler)
+ request = urllib2.Request("http://%s:%d/commits" %(HOST, PORT), data=body)
+ request.add_header('Content-Type', 'application/json')
+ request.get_method = lambda: 'PUT'
+ url = opener.open(request)
+
+
+def main(repo, revision):
+ revision = revision.lstrip('r')
+ i = svncmd_info(repo, revision)
+ data = {'type': 'svn',
+ 'format': 1,
+ 'id': int(revision),
+ 'changed': {},
+ 'repository': svncmd_uuid(repo),
+ 'committer': i['author'],
+ 'log': i['log'],
+ 'date': i['date'],
+ }
+ data['changed'].update(svncmd_changed(repo, revision))
+ body = json.dumps(data)
+ do_put(body)
+
+if __name__ == "__main__":
+ if len(sys.argv) not in (3, 4):
+ sys.stderr.write("invalid args\n")
+ sys.exit(0)
+
+ main(*sys.argv[1:3])
diff --git a/tools/server-side/svnpubsub/daemonize.py b/tools/server-side/svnpubsub/daemonize.py
new file mode 100644
index 0000000..8b85258
--- /dev/null
+++ b/tools/server-side/svnpubsub/daemonize.py
@@ -0,0 +1,272 @@
+# ---------------------------------------------------------------------------
+#
+# Copyright (c) 2005, Greg Stein
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ---------------------------------------------------------------------------
+#
+# This software lives at:
+# http://gstein.googlecode.com/svn/trunk/python/daemonize.py
+#
+
+import os
+import signal
+import sys
+import time
+
+
+# possible return values from Daemon.daemonize()
+DAEMON_RUNNING = 'The daemon is running'
+DAEMON_NOT_RUNNING = 'The daemon is not running'
+DAEMON_COMPLETE = 'The daemon has completed its operations'
+DAEMON_STARTED = 'The daemon has been started'
+
+
+class Daemon(object):
+
+ def __init__(self, logfile, pidfile):
+ self.logfile = logfile
+ self.pidfile = pidfile
+
+ def foreground(self):
+ "Run in the foreground."
+ ### we should probably create a pidfile. other systems may try to detect
+ ### the pidfile to see if this "daemon" is running.
+ self.setup()
+ self.run()
+ ### remove the pidfile
+
+ def daemonize_exit(self):
+ try:
+ result = self.daemonize()
+ except (ChildFailed, DaemonFailed) as e:
+ # duplicate the exit code
+ sys.exit(e.code)
+ except (ChildTerminatedAbnormally, ChildForkFailed,
+ DaemonTerminatedAbnormally, DaemonForkFailed) as e:
+ sys.stderr.write('ERROR: %s\n' % e)
+ sys.exit(1)
+ except ChildResumedIncorrectly:
+ sys.stderr.write('ERROR: continued after receiving unknown signal.\n')
+ sys.exit(1)
+
+ if result == DAEMON_STARTED or result == DAEMON_COMPLETE:
+ sys.exit(0)
+ elif result == DAEMON_NOT_RUNNING:
+ sys.stderr.write('ERROR: the daemon exited with a success code '
+ 'without signalling its startup.\n')
+ sys.exit(1)
+
+ # in original process. daemon is up and running. we're done.
+
+ def daemonize(self):
+ # fork off a child that can detach itself from this process.
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise ChildForkFailed(e.errno, e.strerror)
+
+ if pid > 0:
+ # we're in the parent. let's wait for the child to finish setting
+ # things up -- on our exit, we want to ensure the child is accepting
+ # connections.
+ cpid, status = os.waitpid(pid, 0)
+ assert pid == cpid
+ if os.WIFEXITED(status):
+ code = os.WEXITSTATUS(status)
+ if code:
+ raise ChildFailed(code)
+ return DAEMON_RUNNING
+
+ # the child did not exit cleanly.
+ raise ChildTerminatedAbnormally(status)
+
+ # we're in the child.
+
+ # decouple from the parent process
+ os.chdir('/')
+ os.umask(0)
+ os.setsid()
+
+ # remember this pid so the second child can signal it.
+ thispid = os.getpid()
+
+ # register a signal handler so the SIGUSR1 doesn't stop the process.
+ # this object will also record whether if got signalled.
+ daemon_accepting = SignalCatcher(signal.SIGUSR1)
+
+ # if the daemon process exits before sending SIGUSR1, then we need to see
+ # the problem. trap SIGCHLD with a SignalCatcher.
+ daemon_exit = SignalCatcher(signal.SIGCHLD)
+
+ # perform the second fork
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise DaemonForkFailed(e.errno, e.strerror)
+
+ if pid > 0:
+ # in the parent.
+
+ # we want to wait for the daemon to signal that it has created and
+ # bound the socket, and is (thus) ready for connections. if the
+ # daemon improperly exits before serving, we'll see SIGCHLD and the
+ # .pause will return.
+ ### we should add a timeout to this. allow an optional parameter to
+ ### specify the timeout, in case it takes a long time to start up.
+ signal.pause()
+
+ if daemon_exit.signalled:
+ # reap the daemon process, getting its exit code. bubble it up.
+ cpid, status = os.waitpid(pid, 0)
+ assert pid == cpid
+ if os.WIFEXITED(status):
+ code = os.WEXITSTATUS(status)
+ if code:
+ raise DaemonFailed(code)
+ return DAEMON_NOT_RUNNING
+
+ # the daemon did not exit cleanly.
+ raise DaemonTerminatedAbnormally(status)
+
+ if daemon_accepting.signalled:
+ # the daemon is up and running, so save the pid and return success.
+ if self.pidfile:
+ # Be wary of symlink attacks
+ try:
+ os.remove(self.pidfile)
+ except OSError:
+ pass
+ fd = os.open(self.pidfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0444)
+ os.write(fd, '%d\n' % pid)
+ os.close(fd)
+ return DAEMON_STARTED
+
+ # some other signal popped us out of the pause. the daemon might not
+ # be running.
+ raise ChildResumedIncorrectly()
+
+ # we're a deamon now. get rid of the final remnants of the parent.
+ # start by restoring default signal handlers
+ signal.signal(signal.SIGUSR1, signal.SIG_DFL)
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ si = open('/dev/null', 'r')
+ so = open(self.logfile, 'a+')
+ se = open(self.logfile, 'a+', 0) # unbuffered
+ os.dup2(si.fileno(), sys.stdin.fileno())
+ os.dup2(so.fileno(), sys.stdout.fileno())
+ os.dup2(se.fileno(), sys.stderr.fileno())
+ # note: we could not inline the open() calls. after the fileno() completed,
+ # the file would be closed, making the fileno invalid. gotta hold them
+ # open until now:
+ si.close()
+ so.close()
+ se.close()
+
+ # TEST: don't release the parent immediately. the whole parent stack
+ # should pause along with this sleep.
+ #time.sleep(10)
+
+ # everything is set up. call the initialization function.
+ self.setup()
+
+ # sleep for one second before signalling. we want to make sure the
+ # parent has called signal.pause()
+ ### we should think of a better wait around the race condition.
+ time.sleep(1)
+
+ # okay. the daemon is ready. signal the parent to tell it we're set.
+ os.kill(thispid, signal.SIGUSR1)
+
+ # start the daemon now.
+ self.run()
+
+ # The daemon is shutting down, so toss the pidfile.
+ try:
+ os.remove(self.pidfile)
+ except OSError:
+ pass
+
+ return DAEMON_COMPLETE
+
+ def setup(self):
+ raise NotImplementedError
+
+ def run(self):
+ raise NotImplementedError
+
+
+class SignalCatcher(object):
+ def __init__(self, signum):
+ self.signalled = False
+ signal.signal(signum, self.sig_handler)
+
+ def sig_handler(self, signum, frame):
+ self.signalled = True
+
+
+class ChildTerminatedAbnormally(Exception):
+ "The child process terminated abnormally."
+ def __init__(self, status):
+ Exception.__init__(self, status)
+ self.status = status
+ def __str__(self):
+ return 'child terminated abnormally (0x%04x)' % self.status
+
+class ChildFailed(Exception):
+ "The child process exited with a failure code."
+ def __init__(self, code):
+ Exception.__init__(self, code)
+ self.code = code
+ def __str__(self):
+ return 'child failed with exit code %d' % self.code
+
+class ChildForkFailed(Exception):
+ "The child process could not be forked."
+ def __init__(self, errno, strerror):
+ Exception.__init__(self, errno, strerror)
+ self.errno = errno
+ self.strerror = strerror
+ def __str__(self):
+ return 'child fork failed with error %d (%s)' % self.args
+
+class ChildResumedIncorrectly(Exception):
+ "The child resumed its operation incorrectly."
+
+class DaemonTerminatedAbnormally(Exception):
+ "The daemon process terminated abnormally."
+ def __init__(self, status):
+ Exception.__init__(self, status)
+ self.status = status
+ def __str__(self):
+ return 'daemon terminated abnormally (0x%04x)' % self.status
+
+class DaemonFailed(Exception):
+ "The daemon process exited with a failure code."
+ def __init__(self, code):
+ Exception.__init__(self, code)
+ self.code = code
+ def __str__(self):
+ return 'daemon failed with exit code %d' % self.code
+
+class DaemonForkFailed(Exception):
+ "The daemon process could not be forked."
+ def __init__(self, errno, strerror):
+ Exception.__init__(self, errno, strerror)
+ self.errno = errno
+ self.strerror = strerror
+ def __str__(self):
+ return 'daemon fork failed with error %d (%s)' % self.args
diff --git a/tools/server-side/svnpubsub/irkerbridge.py b/tools/server-side/svnpubsub/irkerbridge.py
new file mode 100755
index 0000000..04b7ee2
--- /dev/null
+++ b/tools/server-side/svnpubsub/irkerbridge.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# IrkerBridge - Bridge an SvnPubSub stream to Irker.
+
+# Example:
+# irkerbridge.py --daemon --pidfile pid --logfile log config
+#
+# For detailed option help use:
+# irkerbridge.py --help
+
+# It expects a config file that has the following parameters:
+# streams=url
+# Space separated list of URLs to streams.
+# This option should only be in the DEFAULT section, is ignored in
+# all other sections.
+# irker=hostname:port
+# The hostname/port combination of the irker daemon. If port is
+# omitted it defaults to 6659. Irker is connected to over UDP.
+# match=What to use to decide if the commit should be sent to irker.
+# It consists of the repository UUID followed by a slash and a glob pattern.
+# The UUID may be replaced by a * to match all UUIDs. The glob pattern will
+# be matched against all of the dirs_changed. Both the UUID and the glob
+# pattern must match to send the message to irker.
+# to=url
+# Space separated list of URLs (any URL that Irker will accept) to
+# send the resulting message to. At current Irker only supports IRC.
+# template=string
+# A string to use to format the output. The string is a Python
+# string Template. The following variables are available:
+# $committer, $id, $date, $repository, $log, $log_firstline,
+# $log_firstparagraph, $dirs_changed, $dirs_count, $dirs_count_s,
+# $subdirs_count, $subdirs_count_s, $dirs_root
+# Most of them should be self explanatory. $dirs_count is the number of
+# entries in $dirs_changed, $dirs_count_s is a friendly string version,
+# $dirs_root is the common root of all the $dirs_changed, $subdirs_count
+# is the number of subdirs under the $dirs_root that changed,
+# $subdirs_root_s is a friendly string version. $log_firstparagraph cuts
+# the log message at the first blank line and replaces newlines with spaces.
+#
+# Within the config file you have sections. Any configuration option
+# missing from a given section is found in the [DEFAULT] section.
+#
+# Section names are arbitrary names that mean nothing to the bridge. Each
+# section other than the [DEFAULT] section consists of a configuration that
+# may match and send a message to irker to deliver. All matching sections
+# will generate a message.
+#
+# Interpolation of values within the config file is allowed by including
+# %(name)s within a value. For example I can reference the UUID of a repo
+# repeatedly by doing:
+# [DEFAULT]
+# ASF_REPO=13f79535-47bb-0310-9956-ffa450edef68
+#
+# [#commits]
+# match=%(ASF_REPO)s/
+#
+# You can HUP the process to reload the config file without restarting the
+# process. However, you cannot change the streams it is listening to without
+# restarting the process.
+#
+# TODO: Logging in a better way.
+
+# Messages longer than this will be truncated and ... added to the end such
+# that the resulting message is no longer than this:
+MAX_PRIVMSG = 400
+
+import os
+import sys
+import posixpath
+import socket
+import json
+import urlparse
+import optparse
+import ConfigParser
+import traceback
+import signal
+import re
+import fnmatch
+from string import Template
+
+# Packages that come with svnpubsub
+import svnpubsub.client
+import daemonize
+
+class Daemon(daemonize.Daemon):
+ def __init__(self, logfile, pidfile, bdec):
+ daemonize.Daemon.__init__(self, logfile, pidfile)
+
+ self.bdec = bdec
+
+ def setup(self):
+ # There is no setup which the parent needs to wait for.
+ pass
+
+ def run(self):
+ print 'irkerbridge started, pid=%d' % (os.getpid())
+
+ mc = svnpubsub.client.MultiClient(self.bdec.urls,
+ self.bdec.commit,
+ self.bdec.event)
+ mc.run_forever()
+
+
+class BigDoEverythingClass(object):
+ def __init__(self, config, options):
+ self.config = config
+ self.options = options
+ self.urls = config.get_value('streams').split()
+
+ def locate_matching_configs(self, commit):
+ result = [ ]
+ for section in self.config.sections():
+ match = self.config.get(section, "match").split('/', 1)
+ if len(match) < 2:
+ # No slash so assume all paths
+ match.append('*')
+ match_uuid, match_path = match
+ if commit.repository == match_uuid or match_uuid == "*":
+ for path in commit.changed:
+ if fnmatch.fnmatch(path, match_path):
+ result.append(section)
+ break
+ return result
+
+ def _generate_dirs_changed(self, commit):
+ if hasattr(commit, 'dirs_changed') or not hasattr(commit, 'changed'):
+ return
+
+ dirs_changed = set()
+ for p in commit.changed:
+ if p[-1] == '/' and commit.changed[p]['flags'][1] == 'U':
+ # directory with property changes add the directory itself.
+ dirs_changed.add(p)
+ else:
+ # everything else add the parent of the path
+ # directories have a trailing slash so if it's present remove
+ # it before finding the parent. The result will be a directory
+ # so it needs a trailing slash
+ dirs_changed.add(posixpath.dirname(p.rstrip('/')) + '/')
+
+ commit.dirs_changed = dirs_changed
+ return
+
+ def fill_in_extra_args(self, commit):
+ # Set any empty members to the string "<null>"
+ v = vars(commit)
+ for k in v.keys():
+ if not v[k]:
+ v[k] = '<null>'
+
+ self._generate_dirs_changed(commit)
+ # Add entries to the commit object that are useful for
+ # formatting.
+ commit.log_firstline = commit.log.split("\n",1)[0]
+ commit.log_firstparagraph = re.split("\r?\n\r?\n",commit.log,1)[0]
+ commit.log_firstparagraph = re.sub("\r?\n"," ",commit.log_firstparagraph)
+ if commit.dirs_changed:
+ commit.dirs_root = posixpath.commonprefix(commit.dirs_changed)
+ if commit.dirs_root == '':
+ commit.dirs_root = '/'
+ commit.dirs_count = len(commit.dirs_changed)
+ if commit.dirs_count > 1:
+ commit.dirs_count_s = " (%d dirs)" %(commit.dirs_count)
+ else:
+ commit.dirs_count_s = ""
+
+ commit.subdirs_count = commit.dirs_count
+ if commit.dirs_root in commit.dirs_changed:
+ commit.subdirs_count -= 1
+ if commit.subdirs_count >= 1:
+ commit.subdirs_count_s = " + %d subdirs" % (commit.subdirs_count)
+ else:
+ commit.subdirs_count_s = ""
+
+ def _send(self, irker, msg):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ irker_list = irker.split(':')
+ if len(irker_list) < 2:
+ irker_list.append(6659)
+ json_msg = json.dumps(msg)
+ sock.sendto(json_msg, (irker_list[0],int(irker_list[1])))
+ if self.options.verbose:
+ print "SENT: %s to %s" % (json_msg, irker)
+
+ def join_all(self):
+ # Like self.commit(), but ignores self.config.get(section, "template").
+ for section in self.config.sections():
+ irker = self.config.get(section, "irker")
+ to_list = self.config.get(section, "to").split()
+ if not irker or not to_list:
+ continue
+ for to in to_list:
+ msg = {'to': to, 'privmsg': ''}
+ self._send(irker, msg)
+
+ def commit(self, url, commit):
+ if self.options.verbose:
+ print "RECV: from %s" % url
+ print json.dumps(vars(commit), indent=2)
+
+ try:
+ config_sections = self.locate_matching_configs(commit)
+ if len(config_sections) > 0:
+ self.fill_in_extra_args(commit)
+ for section in config_sections:
+ irker = self.config.get(section, "irker")
+ to_list = self.config.get(section, "to").split()
+ template = self.config.get(section, "template")
+ if not irker or not to_list or not template:
+ continue
+ privmsg = Template(template).safe_substitute(vars(commit))
+ if len(privmsg) > MAX_PRIVMSG:
+ privmsg = privmsg[:MAX_PRIVMSG-3] + '...'
+ for to in to_list:
+ msg = {'to': to, 'privmsg': privmsg}
+ self._send(irker, msg)
+
+ except:
+ print "Unexpected error:"
+ traceback.print_exc()
+ sys.stdout.flush()
+ raise
+
+ def event(self, url, event_name, event_arg):
+ if self.options.verbose or event_name != "ping":
+ print 'EVENT: %s from %s' % (event_name, url)
+ sys.stdout.flush()
+
+
+
+class ReloadableConfig(ConfigParser.SafeConfigParser):
+ def __init__(self, fname):
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ self.fname = fname
+ self.read(fname)
+
+ signal.signal(signal.SIGHUP, self.hangup)
+
+ def hangup(self, signalnum, frame):
+ self.reload()
+
+ def reload(self):
+ print "RELOAD: config file: %s" % self.fname
+ sys.stdout.flush()
+
+ # Delete everything. Just re-reading would overlay, and would not
+ # remove sections/options. Note that [DEFAULT] will not be removed.
+ for section in self.sections():
+ self.remove_section(section)
+
+ # Get rid of [DEFAULT]
+ self.remove_section(ConfigParser.DEFAULTSECT)
+
+ # Now re-read the configuration file.
+ self.read(self.fname)
+
+ def get_value(self, which):
+ return self.get(ConfigParser.DEFAULTSECT, which)
+
+
+def main(args):
+ parser = optparse.OptionParser(
+ description='An SvnPubSub client that bridges the data to irker.',
+ usage='Usage: %prog [options] CONFIG_FILE',
+ )
+ parser.add_option('--logfile',
+ help='filename for logging')
+ parser.add_option('--verbose', action='store_true',
+ help="enable verbose logging")
+ parser.add_option('--pidfile',
+ help="the process' PID will be written to this file")
+ parser.add_option('--daemon', action='store_true',
+ help='run as a background daemon')
+
+ options, extra = parser.parse_args(args)
+
+ if len(extra) != 1:
+ parser.error('CONFIG_FILE is requried')
+ config_file = os.path.abspath(extra[0])
+
+ logfile, pidfile = None, None
+ if options.daemon:
+ if options.logfile:
+ logfile = os.path.abspath(options.logfile)
+ else:
+ parser.error('LOGFILE is required when running as a daemon')
+
+ if options.pidfile:
+ pidfile = os.path.abspath(options.pidfile)
+ else:
+ parser.error('PIDFILE is required when running as a daemon')
+
+
+ config = ReloadableConfig(config_file)
+ bdec = BigDoEverythingClass(config, options)
+
+ d = Daemon(logfile, pidfile, bdec)
+ if options.daemon:
+ d.daemonize_exit()
+ else:
+ d.foreground()
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/tools/server-side/svnpubsub/rc.d/svnpubsub b/tools/server-side/svnpubsub/rc.d/svnpubsub
new file mode 120000
index 0000000..b05e35e
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnpubsub
@@ -0,0 +1 @@
+svnpubsub.freebsd \ No newline at end of file
diff --git a/tools/server-side/svnpubsub/rc.d/svnpubsub.debian b/tools/server-side/svnpubsub/rc.d/svnpubsub.debian
new file mode 100755
index 0000000..c61057d
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnpubsub.debian
@@ -0,0 +1,62 @@
+#!/bin/bash
+### BEGIN INIT INFO
+# Provides: svnpubsub
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: SvnPubSub
+# Description: start SvnPubSub daemon
+#### END INIT INFO
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+svnpubsub_user=${svnpubsub_user-"daemon"}
+svnpubsub_group=${svnpubsub_group-"daemon"}
+svnpubsub_reactor=${svnpubsub_reactor-"poll"}
+svnpubsub_pidfile=${svnpubsub_pidfile-"/var/run/svnpubsub.pid"}
+pidfile="${svnpubsub_pidfile}"
+
+TWSITD_CMD="/usr/bin/twistd -y /opt/svnpubsub/svnpubsub.tac \
+ --logfile=/var/log/svnpubsub/svnpubsub.log \
+ --pidfile=${pidfile} \
+ --uid=${svnpubsub_user} --gid=${svnpubsub_user} \
+ -r${svnpubsub_reactor}"
+
+RETVAL=0
+
+start() {
+ echo "Starting SvnPubSub Server: "
+ $TWSITD_CMD
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+stop() {
+ echo "Stopping SvnPubSub Server: "
+ THE_PID=`cat ${pidfile}`
+ kill $THE_PID
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/server-side/svnpubsub/rc.d/svnpubsub.freebsd b/tools/server-side/svnpubsub/rc.d/svnpubsub.freebsd
new file mode 100755
index 0000000..71fc8c8
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnpubsub.freebsd
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# PROVIDE: svnpubsub
+# REQUIRE: DAEMON
+# KEYWORD: shutdown
+
+. /etc/rc.subr
+
+name="svnpubsub"
+rcvar=`set_rcvar`
+
+load_rc_config $name
+
+#
+# DO NOT CHANGE THESE DEFAULT VALUES HERE
+# SET THEM IN THE /etc/rc.conf FILE
+#
+svnpubsub_enable=${svnpubsub_enable-"NO"}
+svnpubsub_user=${svnpubsub_user-"svn"}
+svnpubsub_group=${svnpubsub_group-"svn"}
+svnpubsub_reactor=${svnpubsub_reactor-"poll"}
+svnpubsub_pidfile=${svnpubsub_pidfile-"/var/run/svnpubsub/svnpubsub.pid"}
+svnpubsub_cmd_int=${svnpubsub_cmd_int-"python"}
+pidfile="${svnpubsub_pidfile}"
+
+export PYTHON_EGG_CACHE="/home/svn/.python-eggs"
+
+command="/usr/local/bin/twistd"
+command_interpreter="/usr/local/bin/${svnwcsub_cmd_int}"
+command_args="-y /usr/local/svnpubsub/svnpubsub.tac \
+ --logfile=/var/log/vc/svnpubsub.log \
+ --pidfile=${pidfile} \
+ --uid=${svnpubsub_user} --gid=${svnpubsub_user} \
+ -r${svnpubsub_reactor}"
+
+
+run_rc_command "$1"
diff --git a/tools/server-side/svnpubsub/rc.d/svnpubsub.solaris b/tools/server-side/svnpubsub/rc.d/svnpubsub.solaris
new file mode 100755
index 0000000..3a9cf9f
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnpubsub.solaris
@@ -0,0 +1,53 @@
+#!/usr/bin/bash
+#
+# a dumb init script for twistd on solaris. cus like, writing XML for SMF is f'ing lame.
+#
+
+svnpubsub_user=${svnpubsub_user-"daemon"}
+svnpubsub_group=${svnpubsub_group-"daemon"}
+svnpubsub_reactor=${svnpubsub_reactor-"poll"}
+svnpubsub_pidfile=${svnpubsub_pidfile-"/var/run/svnpubsub/svnpubsub.pid"}
+pidfile="${svnpubsub_pidfile}"
+
+TWSITD_CMD="/opt/local/bin//twistd -y /usr/local/svnpubsub/svnpubsub.tac \
+ --logfile=/x1/log/svnpubsub.log \
+ --pidfile=${pidfile} \
+ --uid=${svnpubsub_user} --gid=${svnpubsub_user} \
+ -r${svnpubsub_reactor}"
+
+RETVAL=0
+
+start() {
+ echo "Starting SvnPubSub Server: "
+ $TWSITD_CMD
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+stop() {
+ echo "Stopping SvnPubSub Server: "
+ THE_PID=`cat ${pidfile}`
+ kill $THE_PID
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/server-side/svnpubsub/rc.d/svnwcsub b/tools/server-side/svnpubsub/rc.d/svnwcsub
new file mode 120000
index 0000000..310fcbe
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnwcsub
@@ -0,0 +1 @@
+svnwcsub.freebsd \ No newline at end of file
diff --git a/tools/server-side/svnpubsub/rc.d/svnwcsub.debian b/tools/server-side/svnpubsub/rc.d/svnwcsub.debian
new file mode 100755
index 0000000..caf5511
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnwcsub.debian
@@ -0,0 +1,65 @@
+#!/bin/bash
+### BEGIN INIT INFO
+# Provides: svnwcsub
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: SvnWcSub
+# Description: start SvnWcSub daemon
+#### END INIT INFO
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+svnwcsub_user=${svnwcsub_user-"svnwc"}
+svnwcsub_group=${svnwcsub_group-"svnwc"}
+svnwcsub_pidfile=${svnwcsub_pidfile-"/var/run/svnwcsub.pid"}
+svnwcsub_config=${svnwcsub_config-"/etc/svnwcsub.conf"}
+svnwcsub_logfile=${svnwcsub_logfile-"/var/log/svnwcsub/svnwcsub.log"}
+pidfile="${svnwcsub_pidfile}"
+
+SVNWCSUB_CMD="/opt/svnpubsub/svnwcsub.py \
+ --daemon \
+ --logfile=${svnwcsub_logfile} \
+ --pidfile=${pidfile} \
+ --uid=${svnwcsub_user} --gid=${svnwcsub_group} \
+ --umask=002 \
+ ${svnwcsub_config} "
+
+RETVAL=0
+
+start() {
+ echo "Starting SvnWcSub Server: "
+ $SVNWCSUB_CMD
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+stop() {
+ echo "Stopping SvnWcSub Server: "
+ THE_PID=`cat ${pidfile}`
+ kill $THE_PID
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/server-side/svnpubsub/rc.d/svnwcsub.freebsd b/tools/server-side/svnpubsub/rc.d/svnwcsub.freebsd
new file mode 100755
index 0000000..58ad386
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnwcsub.freebsd
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# PROVIDE: svnwcsub
+# REQUIRE: DAEMON
+# KEYWORD: shutdown
+
+. /etc/rc.subr
+
+name="svnwcsub"
+rcvar=`set_rcvar`
+
+load_rc_config $name
+
+#
+# DO NOT CHANGE THESE DEFAULT VALUES HERE
+# SET THEM IN THE /etc/rc.conf FILE
+#
+svnwcsub_enable=${svnwcsub_enable-"NO"}
+svnwcsub_user=${svnwcsub_user-"svnwc"}
+svnwcsub_group=${svnwcsub_group-"svnwc"}
+svnwcsub_pidfile=${svnwcsub_pidfile-"/var/run/svnwcsub/svnwcsub.pub"}
+svnwcsub_env="PYTHON_EGG_CACHE"
+svnwcsub_cmd_int=${svnwcsub_cmd_int-"python"}
+svnwcsub_config=${svnwcsub_config-"/etc/svnwcsub.conf"}
+svnwcsub_logfile=${svnwcsub_logfile-"/var/log/svnwcsub/svnwcsub.log"}
+pidfile="${svnwcsub_pidfile}"
+
+export PYTHON_EGG_CACHE="/var/run/svnwcsub"
+
+command="/usr/local/svnpubsub/svnwcsub.py"
+command_interpreter="/usr/local/bin/${svnwcsub_cmd_int}"
+command_args="--daemon \
+ --logfile=${svnwcsub_logfile} \
+ --pidfile=${pidfile} \
+ --uid=${svnwcsub_user} --gid=${svnwcsub_group} \
+ --umask=002 \
+ ${svnwcsub_config}"
+
+run_rc_command "$1"
diff --git a/tools/server-side/svnpubsub/rc.d/svnwcsub.solaris b/tools/server-side/svnpubsub/rc.d/svnwcsub.solaris
new file mode 100755
index 0000000..bd0c2bd
--- /dev/null
+++ b/tools/server-side/svnpubsub/rc.d/svnwcsub.solaris
@@ -0,0 +1,56 @@
+#!/usr/bin/bash
+#
+# a dumb init script for twistd on solaris. cus like, writing XML for SMF is f'ing lame.
+#
+
+svnwcsub_user=${svnwcsub_user-"svnwc"}
+svnwcsub_group=${svnwcsub_group-"other"}
+svnwcsub_pidfile=${svnwcsub_pidfile-"/var/run/svnwcsub/svnwcsub.pid"}
+svnwcsub_config=${svnwcsub_config-"/etc/svnwcsub.conf"}
+svnwcsub_logfile=${svnwcsub_logfile-"/x1/log/svnwcsub/svnwcsub.log"}
+pidfile="${svnwcsub_pidfile}"
+
+SVNWCSUB_CMD="/usr/local/svnpubsub/svnwcsub.py \
+ --daemon \
+ --logfile=${svnwcsub_logfile} \
+ --pidfile=${pidfile} \
+ --uid=${svnwcsub_user} --gid=${svnwcsub_group} \
+ --umask=002 \
+ ${svnwcsub_config}"
+
+RETVAL=0
+
+start() {
+ echo "Starting SvnWcSub Server: "
+ $SVNWCSUB_CMD
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+stop() {
+ echo "Stopping SvnWcSub Server: "
+ THE_PID=`cat ${pidfile}`
+ kill $THE_PID
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && echo "ok" || echo "failed"
+ return $RETVAL
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/server-side/svnpubsub/svnpubsub.tac b/tools/server-side/svnpubsub/svnpubsub.tac
new file mode 100644
index 0000000..574ad24
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnpubsub.tac
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+import os
+from twisted.application import service, internet
+
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+from svnpubsub.server import svnpubsub_server
+
+application = service.Application("SvnPubSub")
+
+def get_service():
+ return internet.TCPServer(2069, svnpubsub_server())
+
+service = get_service()
+service.setServiceParent(application)
diff --git a/tools/server-side/svnpubsub/svnpubsub/__init__.py b/tools/server-side/svnpubsub/svnpubsub/__init__.py
new file mode 100644
index 0000000..f50e195
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnpubsub/__init__.py
@@ -0,0 +1 @@
+# Turn svnpubsub/ into a package.
diff --git a/tools/server-side/svnpubsub/svnpubsub/client.py b/tools/server-side/svnpubsub/svnpubsub/client.py
new file mode 100644
index 0000000..c1631d6
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnpubsub/client.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Generic client for SvnPubSub
+#
+# ### usage...
+#
+#
+# EVENTS
+#
+# connected: a connection to the server has been opened (though not
+# necessarily established)
+# closed: the connection was closed. reconnect will be attempted.
+# error: an error closed the connection. reconnect will be attempted.
+# ping: the server has sent a keepalive
+# stale: no activity has been seen, so the connection will be closed
+# and reopened
+#
+
+import asyncore
+import asynchat
+import socket
+import functools
+import time
+import json
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+# How long the polling loop should wait for activity before returning.
+TIMEOUT = 30.0
+
+# Always delay a bit when trying to reconnect. This is not precise, but sets
+# a minimum amount of delay. At the moment, there is no further backoff.
+RECONNECT_DELAY = 25.0
+
+# If we don't see anything from the server for this amount time, then we
+# will drop and reconnect. The TCP connection may have gone down without
+# us noticing it somehow.
+STALE_DELAY = 60.0
+
+
+class SvnpubsubClientException(Exception):
+ pass
+
+class Client(asynchat.async_chat):
+
+ def __init__(self, url, commit_callback, event_callback):
+ asynchat.async_chat.__init__(self)
+
+ self.last_activity = time.time()
+ self.ibuffer = []
+
+ self.url = url
+ parsed_url = urlparse.urlsplit(url)
+ if parsed_url.scheme != 'http':
+ raise ValueError("URL scheme must be http: '%s'" % url)
+ host = parsed_url.hostname
+ port = parsed_url.port
+ resource = parsed_url.path
+ if parsed_url.query:
+ resource += "?%s" % parsed_url.query
+ if parsed_url.fragment:
+ resource += "#%s" % parsed_url.fragment
+
+ self.event_callback = event_callback
+
+ self.parser = JSONRecordHandler(commit_callback, event_callback)
+
+ # Wait for the end of headers. Then we start parsing JSON.
+ self.set_terminator(b'\r\n\r\n')
+ self.skipping_headers = True
+
+ self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ self.connect((host, port))
+ except:
+ self.handle_error()
+ return
+
+ self.push(('GET %s HTTP/1.0\r\n\r\n' % resource).encode('ascii'))
+
+ def handle_connect(self):
+ self.event_callback('connected', None)
+
+ def handle_close(self):
+ self.event_callback('closed', None)
+ self.close()
+
+ def handle_error(self):
+ self.event_callback('error', None)
+ self.close()
+
+ def found_terminator(self):
+ if self.skipping_headers:
+ self.skipping_headers = False
+ # Each JSON record is terminated by a null character
+ self.set_terminator(b'\0')
+ else:
+ record = b"".join(self.ibuffer)
+ self.ibuffer = []
+ self.parser.feed(record.decode())
+
+ def collect_incoming_data(self, data):
+ # Remember the last time we saw activity
+ self.last_activity = time.time()
+
+ if not self.skipping_headers:
+ self.ibuffer.append(data)
+
+
+class JSONRecordHandler:
+ def __init__(self, commit_callback, event_callback):
+ self.commit_callback = commit_callback
+ self.event_callback = event_callback
+
+ def feed(self, record):
+ obj = json.loads(record)
+ if 'svnpubsub' in obj:
+ actual_version = obj['svnpubsub'].get('version')
+ EXPECTED_VERSION = 1
+ if actual_version != EXPECTED_VERSION:
+ raise SvnpubsubClientException("Unknown svnpubsub format: %r != %d"
+ % (actual_format, expected_format))
+ self.event_callback('version', obj['svnpubsub']['version'])
+ elif 'commit' in obj:
+ commit = Commit(obj['commit'])
+ self.commit_callback(commit)
+ elif 'stillalive' in obj:
+ self.event_callback('ping', obj['stillalive'])
+
+
+class Commit(object):
+ def __init__(self, commit):
+ self.__dict__.update(commit)
+
+
+class MultiClient(object):
+ def __init__(self, urls, commit_callback, event_callback):
+ self.commit_callback = commit_callback
+ self.event_callback = event_callback
+
+ # No target time, as no work to do
+ self.target_time = 0
+ self.work_items = [ ]
+
+ for url in urls:
+ self._add_channel(url)
+
+ def _reconnect(self, url, event_name, event_arg):
+ if event_name == 'closed' or event_name == 'error':
+ # Stupid connection closed for some reason. Set up a reconnect. Note
+ # that it should have been removed from asyncore.socket_map already.
+ self._reconnect_later(url)
+
+ # Call the user's callback now.
+ self.event_callback(url, event_name, event_arg)
+
+ def _reconnect_later(self, url):
+ # Set up a work item to reconnect in a little while.
+ self.work_items.append(url)
+
+ # Only set a target if one has not been set yet. Otherwise, we could
+ # create a race condition of continually moving out towards the future
+ if not self.target_time:
+ self.target_time = time.time() + RECONNECT_DELAY
+
+ def _add_channel(self, url):
+ # Simply instantiating the client will install it into the global map
+ # for processing in the main event loop.
+ Client(url,
+ functools.partial(self.commit_callback, url),
+ functools.partial(self._reconnect, url))
+
+ def _check_stale(self):
+ now = time.time()
+ for client in asyncore.socket_map.values():
+ if client.last_activity + STALE_DELAY < now:
+ # Whoops. No activity in a while. Signal this fact, Close the
+ # Client, then have it reconnected later on.
+ self.event_callback(client.url, 'stale', client.last_activity)
+
+ # This should remove it from .socket_map.
+ client.close()
+
+ self._reconnect_later(client.url)
+
+ def _maybe_work(self):
+ # If we haven't reach the targetted time, or have no work to do,
+ # then fast-path exit
+ if time.time() < self.target_time or not self.work_items:
+ return
+
+ # We'll take care of all the work items, so no target for future work
+ self.target_time = 0
+
+ # Play a little dance just in case work gets added while we're
+ # currently working on stuff
+ work = self.work_items
+ self.work_items = [ ]
+
+ for url in work:
+ self._add_channel(url)
+
+ def run_forever(self):
+ while True:
+ if asyncore.socket_map:
+ asyncore.loop(timeout=TIMEOUT, count=1)
+ else:
+ time.sleep(TIMEOUT)
+
+ self._check_stale()
+ self._maybe_work()
diff --git a/tools/server-side/svnpubsub/svnpubsub/server.py b/tools/server-side/svnpubsub/svnpubsub/server.py
new file mode 100644
index 0000000..faee423
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnpubsub/server.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# SvnPubSub - Simple Push Notification of Subversion commits
+#
+# Based on the theory behind the Live Journal Atom Streaming Service:
+# <http://atom.services.livejournal.com/>
+#
+# Instead of using a complicated XMPP/AMPQ/JMS/super messaging service,
+# we have simple HTTP GETs and PUTs to get data in and out.
+#
+# Currently supports both XML and JSON serialization.
+#
+# Example Sub clients:
+# curl -sN http://127.0.0.1:2069/commits
+# curl -sN http://127.0.0.1:2069/commits/svn/*
+# curl -sN http://127.0.0.1:2069/commits/svn
+# curl -sN http://127.0.0.1:2069/commits/*/13f79535-47bb-0310-9956-ffa450edef68
+# curl -sN http://127.0.0.1:2069/commits/svn/13f79535-47bb-0310-9956-ffa450edef68
+#
+# URL is built into 2 parts:
+# /commits/${optional_type}/${optional_repository}
+#
+# If the type is included in the URL, you will only get commits of that type.
+# The type can be * and then you will receive commits of any type.
+#
+# If the repository is included in the URL, you will only receive
+# messages about that repository. The repository can be * and then you
+# will receive messages about all repositories.
+#
+# Example Pub clients:
+# curl -T revinfo.json -i http://127.0.0.1:2069/commits
+#
+# TODO:
+# - Add Real access controls (not just 127.0.0.1)
+# - Document PUT format
+# - Convert to twisted.python.log
+
+
+
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import sys
+
+import twisted
+from twisted.internet import reactor
+from twisted.internet import defer
+from twisted.web import server
+from twisted.web import resource
+from twisted.python import log
+
+import time
+
+class Commit:
+ def __init__(self, r):
+ self.__dict__.update(r)
+ if not self.check_value('repository'):
+ raise ValueError('Invalid Repository Value')
+ if not self.check_value('type'):
+ raise ValueError('Invalid Type Value')
+ if not self.check_value('format'):
+ raise ValueError('Invalid Format Value')
+ if not self.check_value('id'):
+ raise ValueError('Invalid ID Value')
+
+ def check_value(self, k):
+ return hasattr(self, k) and self.__dict__[k]
+
+ def render_commit(self):
+ obj = {'commit': {}}
+ obj['commit'].update(self.__dict__)
+ return json.dumps(obj)
+
+ def render_log(self):
+ try:
+ paths_changed = " %d paths changed" % len(self.changed)
+ except:
+ paths_changed = ""
+ return "%s:%s repo '%s' id '%s'%s" % (self.type,
+ self.format,
+ self.repository,
+ self.id,
+ paths_changed)
+
+
+HEARTBEAT_TIME = 15
+
+class Client(object):
+ def __init__(self, pubsub, r, type, repository):
+ self.pubsub = pubsub
+ r.notifyFinish().addErrback(self.finished)
+ self.r = r
+ self.type = type
+ self.repository = repository
+ self.alive = True
+ log.msg("OPEN: %s:%d (%d clients online)"% (r.getClientIP(), r.client.port, pubsub.cc()+1))
+
+ def finished(self, reason):
+ self.alive = False
+ log.msg("CLOSE: %s:%d (%d clients online)"% (self.r.getClientIP(), self.r.client.port, self.pubsub.cc()))
+ try:
+ self.pubsub.remove(self)
+ except ValueError:
+ pass
+
+ def interested_in(self, commit):
+ if self.type and self.type != commit.type:
+ return False
+
+ if self.repository and self.repository != commit.repository:
+ return False
+
+ return True
+
+ def notify(self, data):
+ self.write(data)
+
+ def start(self):
+ self.write_start()
+ reactor.callLater(HEARTBEAT_TIME, self.heartbeat, None)
+
+ def heartbeat(self, args):
+ if self.alive:
+ self.write_heartbeat()
+ reactor.callLater(HEARTBEAT_TIME, self.heartbeat, None)
+
+ def write_data(self, data):
+ self.write(data + "\n\0")
+
+ """ "Data must not be unicode" is what the interfaces.ITransport says... grr. """
+ def write(self, input):
+ self.r.write(str(input))
+
+ def write_start(self):
+ self.r.setHeader('X-SVNPubSub-Version', '1')
+ self.r.setHeader('content-type', 'application/vnd.apache.vc-notify+json')
+ self.write('{"svnpubsub": {"version": 1}}\n\0')
+
+ def write_heartbeat(self):
+ self.write(json.dumps({"stillalive": time.time()}) + "\n\0")
+
+
+class SvnPubSub(resource.Resource):
+ isLeaf = True
+ clients = []
+
+ def cc(self):
+ return len(self.clients)
+
+ def remove(self, c):
+ self.clients.remove(c)
+
+ def render_GET(self, request):
+ log.msg("REQUEST: %s" % (request.uri))
+ request.setHeader('content-type', 'text/plain')
+
+ repository = None
+ type = None
+
+ uri = request.uri.split('/')
+ uri_len = len(uri)
+ if uri_len < 2 or uri_len > 4:
+ request.setResponseCode(400)
+ return "Invalid path\n"
+
+ if uri_len >= 3:
+ type = uri[2]
+
+ if uri_len == 4:
+ repository = uri[3]
+
+ # Convert wild card to None.
+ if type == '*':
+ type = None
+ if repository == '*':
+ repository = None
+
+ c = Client(self, request, type, repository)
+ self.clients.append(c)
+ c.start()
+ return twisted.web.server.NOT_DONE_YET
+
+ def notifyAll(self, commit):
+ data = commit.render_commit()
+
+ log.msg("COMMIT: %s (%d clients)" % (commit.render_log(), self.cc()))
+ for client in self.clients:
+ if client.interested_in(commit):
+ client.write_data(data)
+
+ def render_PUT(self, request):
+ request.setHeader('content-type', 'text/plain')
+ ip = request.getClientIP()
+ if ip != "127.0.0.1":
+ request.setResponseCode(401)
+ return "Access Denied"
+ input = request.content.read()
+ #import pdb;pdb.set_trace()
+ #print "input: %s" % (input)
+ try:
+ c = json.loads(input)
+ commit = Commit(c)
+ except ValueError as e:
+ request.setResponseCode(400)
+ log.msg("COMMIT: failed due to: %s" % str(e))
+ return str(e)
+ self.notifyAll(commit)
+ return "Ok"
+
+def svnpubsub_server():
+ root = resource.Resource()
+ s = SvnPubSub()
+ root.putChild("commits", s)
+ return server.Site(root)
+
+if __name__ == "__main__":
+ log.startLogging(sys.stdout)
+ # Port 2069 "HTTP Event Port", whatever, sounds good to me
+ reactor.listenTCP(2069, svnpubsub_server())
+ reactor.run()
+
diff --git a/tools/server-side/svnpubsub/svntweet.py b/tools/server-side/svnpubsub/svntweet.py
new file mode 100755
index 0000000..ed426bd
--- /dev/null
+++ b/tools/server-side/svnpubsub/svntweet.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# SvnTweet - Subscribe to a SvnPubSub stream, and Twitter about it!
+#
+# Example:
+# svntweet.py my-config.json
+#
+# With my-config.json containing stream paths and the twitter auth info:
+# {"stream": "http://svn.apache.org:2069/commits",
+# "username": "asfcommits",
+# "password": "MyLuggageComboIs1234"}
+#
+#
+#
+
+import threading
+import sys
+import os
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from twisted.internet import defer, reactor, task, threads
+from twisted.python import failure, log
+from twisted.web.client import HTTPClientFactory, HTTPPageDownloader
+
+from urlparse import urlparse
+import time
+import posixpath
+
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "twitty-twister", "lib"))
+try:
+ import twitter
+except:
+ print "Get a copy of twitty-twister from <http://github.com/dustin/twitty-twister>"
+ sys.exit(-1)
+class Config(object):
+ def __init__(self, path):
+ self.path = path
+ self.mtime_path = 0
+ self.config = {}
+ self._load_config()
+
+ def _load_config(self):
+ mtime = os.path.getmtime(self.path)
+ if mtime != self.mtime_path:
+ fp = open(self.path, "rb")
+ self.mtime_path = mtime
+ self.config = json.loads(fp.read())
+
+class HTTPStream(HTTPClientFactory):
+ protocol = HTTPPageDownloader
+
+ def __init__(self, url):
+ HTTPClientFactory.__init__(self, url, method="GET", agent="SvnTweet/0.1.0")
+
+ def pageStart(self, partial):
+ pass
+
+ def pagePart(self, data):
+ pass
+
+ def pageEnd(self):
+ pass
+
+class Commit(object):
+ def __init__(self, commit):
+ self.__dict__.update(commit)
+
+class JSONRecordHandler:
+ def __init__(self, bdec):
+ self.bdec = bdec
+
+ def feed(self, record):
+ obj = json.loads(record)
+ if 'svnpubsub' in obj:
+ actual_version = obj['svnpubsub'].get('version')
+ EXPECTED_VERSION = 1
+ if actual_version != EXPECTED_VERSION:
+ raise ValueException("Unknown svnpubsub format: %r != %d"
+ % (actual_format, expected_format))
+ elif 'commit' in obj:
+ commit = Commit(obj['commit'])
+ if not hasattr(commit, 'type'):
+ raise ValueException("Commit object is missing type field.")
+ if not hasattr(commit, 'format'):
+ raise ValueException("Commit object is missing format field.")
+ if commit.type != 'svn' and commit.format != 1:
+ raise ValueException("Unexpected type and/or format: %s:%s"
+ % (commit.type, commit.format))
+ self.bdec.commit(commit)
+ elif 'stillalive' in obj:
+ self.bdec.stillalive()
+
+class JSONHTTPStream(HTTPStream):
+ def __init__(self, url, bdec):
+ HTTPStream.__init__(self, url)
+ self.bdec = bdec
+ self.ibuffer = []
+ self.parser = JSONRecordHandler(bdec)
+
+ def pageStart(self, partial):
+ self.bdec.pageStart()
+
+ def pagePart(self, data):
+ eor = data.find("\0")
+ if eor >= 0:
+ self.ibuffer.append(data[0:eor])
+ self.parser.feed(''.join(self.ibuffer))
+ self.ibuffer = [data[eor+1:]]
+ else:
+ self.ibuffer.append(data)
+
+def connectTo(url, bdec):
+ u = urlparse(url)
+ port = u.port
+ if not port:
+ port = 80
+ s = JSONHTTPStream(url, bdec)
+ conn = reactor.connectTCP(u.hostname, u.port, s)
+ return [s, conn]
+
+
+CHECKBEAT_TIME = 90
+
+class BigDoEverythingClasss(object):
+ def __init__(self, config):
+ self.c = config
+ self.c._load_config()
+ self.url = str(self.c.config.get('stream'))
+ self.failures = 0
+ self.alive = time.time()
+ self.checker = task.LoopingCall(self._checkalive)
+ self.transport = None
+ self.stream = None
+ self._restartStream()
+ self.watch = []
+ self.twit = twitter.Twitter(self.c.config.get('username'), self.c.config.get('password'))
+
+ def pageStart(self):
+ log.msg("Stream Connection Established")
+ self.failures = 0
+
+ def _restartStream(self):
+ (self.stream, self.transport) = connectTo(self.url, self)
+ self.stream.deferred.addBoth(self.streamDead)
+ self.alive = time.time()
+ self.checker.start(CHECKBEAT_TIME)
+
+ def _checkalive(self):
+ n = time.time()
+ if n - self.alive > CHECKBEAT_TIME:
+ log.msg("Stream is dead, reconnecting")
+ self.transport.disconnect()
+
+ def stillalive(self):
+ self.alive = time.time()
+
+ def streamDead(self, v):
+ BACKOFF_SECS = 5
+ BACKOFF_MAX = 60
+ self.checker.stop()
+
+ self.stream = None
+ self.failures += 1
+ backoff = min(self.failures * BACKOFF_SECS, BACKOFF_MAX)
+ log.msg("Stream disconnected, trying again in %d seconds.... %s" % (backoff, self.url))
+ reactor.callLater(backoff, self._restartStream)
+
+ def _normalize_path(self, path):
+ if path[0] != '/':
+ return "/" + path
+ return posixpath.abspath(path)
+
+ def tweet(self, msg):
+ log.msg("SEND TWEET: %s" % (msg))
+ self.twit.update(msg).addCallback(self.tweet_done).addErrback(log.msg)
+
+ def tweet_done(self, x):
+ log.msg("TWEET: Success!")
+
+ def build_tweet(self, commit):
+ maxlen = 144
+ left = maxlen
+ paths = map(self._normalize_path, commit.changed)
+ if not len(paths):
+ return None
+ path = posixpath.commonprefix(paths)
+ if path[0:1] == '/' and len(path) > 1:
+ path = path[1:]
+
+ #TODO: allow URL to be configurable.
+ link = " - http://svn.apache.org/r%d" % (commit.id)
+ left -= len(link)
+ msg = "r%d in %s by %s: " % (commit.id, path, commit.committer)
+ left -= len(msg)
+ if left > 3:
+ msg += commit.log[0:left]
+ msg += link
+ return msg
+
+ def commit(self, commit):
+ log.msg("COMMIT r%d (%d paths)" % (commit.id, len(commit.changed)))
+ msg = self.build_tweet(commit)
+ if msg:
+ self.tweet(msg)
+ #print "Common Prefix: %s" % (pre)
+
+def main(config_file):
+ c = Config(config_file)
+ big = BigDoEverythingClasss(c)
+ reactor.run()
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print "invalid args, read source code"
+ sys.exit(0)
+ log.startLogging(sys.stdout)
+ main(sys.argv[1])
diff --git a/tools/server-side/svnpubsub/svnwcsub.conf.example b/tools/server-side/svnpubsub/svnwcsub.conf.example
new file mode 100644
index 0000000..644a3b7
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnwcsub.conf.example
@@ -0,0 +1,16 @@
+[DEFAULT]
+svnbin: /usr/local/bin/svn
+streams: http://svn.example.org:2069/commits/svn
+# hook: /usr/bin/true
+
+## The values below are used by ConfigParser's interpolation syntax.
+## See http://docs.python.org/library/configparser
+SOME_REPOS: svn://svn.example.org/repos/chaos
+
+[env]
+HOME: /home/svn
+LANG: en_US.UTF-8
+
+[track]
+/usr/local/foo/prod: %(SOME_REPOS)s/foo/production
+/usr/local/foo/dev: %(SOME_REPOS)s/foo/trunk
diff --git a/tools/server-side/svnpubsub/svnwcsub.py b/tools/server-side/svnpubsub/svnwcsub.py
new file mode 100755
index 0000000..366df7c
--- /dev/null
+++ b/tools/server-side/svnpubsub/svnwcsub.py
@@ -0,0 +1,546 @@
+#!/usr/bin/env python
+# encoding: UTF-8
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# SvnWcSub - Subscribe to a SvnPubSub stream, and keep a set of working copy
+# paths in sync
+#
+# Example:
+# svnwcsub.py svnwcsub.conf
+#
+# On startup svnwcsub checks the working copy's path, runs a single svn update
+# and then watches for changes to that path.
+#
+# See svnwcsub.conf for more information on its contents.
+#
+
+# TODO:
+# - bulk update at startup time to avoid backlog warnings
+# - fold BDEC into Daemon
+# - fold WorkingCopy._get_match() into __init__
+# - remove wc_ready(). assume all WorkingCopy instances are usable.
+# place the instances into .watch at creation. the .update_applies()
+# just returns if the wc is disabled (eg. could not find wc dir)
+# - figure out way to avoid the ASF-specific PRODUCTION_RE_FILTER
+# (a base path exclusion list should work for the ASF)
+# - add support for SIGHUP to reread the config and reinitialize working copies
+# - joes will write documentation for svnpubsub as these items become fulfilled
+# - make LOGLEVEL configurable
+
+import errno
+import subprocess
+import threading
+import sys
+import os
+import re
+import posixpath
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+import time
+import logging.handlers
+try:
+ import Queue
+except ImportError:
+ import queue as Queue
+import optparse
+import functools
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+import daemonize
+import svnpubsub.client
+
+# check_output() is only available in Python 2.7. Allow us to run with
+# earlier versions
+try:
+ check_output = subprocess.check_output
+except AttributeError:
+ def check_output(args, env): # note: we only use these two args
+ pipe = subprocess.Popen(args, stdout=subprocess.PIPE, env=env)
+ output, _ = pipe.communicate()
+ if pipe.returncode:
+ raise subprocess.CalledProcessError(pipe.returncode, args)
+ return output
+
+assert hasattr(subprocess, 'check_call')
+def check_call(*args, **kwds):
+ """Wrapper around subprocess.check_call() that logs stderr upon failure."""
+ assert 'stderr' not in kwds
+ kwds.update(stderr=subprocess.PIPE)
+ pipe = subprocess.Popen(*args, **kwds)
+ output, errput = pipe.communicate()
+ if pipe.returncode:
+ cmd = args[0] if len(args) else kwds.get('args', '(no command)')
+ # TODO: log stdout too?
+ logging.error('Command failed: returncode=%d command=%r stderr=%r',
+ pipe.returncode, cmd, errput)
+ raise subprocess.CalledProcessError(pipe.returncode, args)
+ return pipe.returncode # is EXIT_OK
+
+### note: this runs synchronously. within the current Twisted environment,
+### it is called from ._get_match() which is run on a thread so it won't
+### block the Twisted main loop.
+def svn_info(svnbin, env, path):
+ "Run 'svn info' on the target path, returning a dict of info data."
+ args = [svnbin, "info", "--non-interactive", "--", path]
+ output = check_output(args, env=env).strip()
+ info = { }
+ for line in output.split('\n'):
+ idx = line.index(':')
+ info[line[:idx]] = line[idx+1:].strip()
+ return info
+
+try:
+ import glob
+ glob.iglob
+ def is_emptydir(path):
+ # ### If the directory contains only dotfile children, this will readdir()
+ # ### the entire directory. But os.readdir() is not exposed to us...
+ for x in glob.iglob('%s/*' % path):
+ return False
+ for x in glob.iglob('%s/.*' % path):
+ return False
+ return True
+except (ImportError, AttributeError):
+ # Python ≤2.4
+ def is_emptydir(path):
+ # This will read the entire directory list to memory.
+ return not os.listdir(path)
+
+class WorkingCopy(object):
+ def __init__(self, bdec, path, url):
+ self.path = path
+ self.url = url
+
+ try:
+ self.match, self.uuid = self._get_match(bdec.svnbin, bdec.env)
+ bdec.wc_ready(self)
+ except:
+ logging.exception('problem with working copy: %s', path)
+
+ def update_applies(self, uuid, path):
+ if self.uuid != uuid:
+ return False
+
+ path = str(path)
+ if path == self.match:
+ #print "ua: Simple match"
+ # easy case. woo.
+ return True
+ if len(path) < len(self.match):
+ # path is potentially a parent directory of match?
+ #print "ua: parent check"
+ if self.match[0:len(path)] == path:
+ return True
+ if len(path) > len(self.match):
+ # path is potentially a sub directory of match
+ #print "ua: sub dir check"
+ if path[0:len(self.match)] == self.match:
+ return True
+ return False
+
+ def _get_match(self, svnbin, env):
+ ### quick little hack to auto-checkout missing working copies
+ dotsvn = os.path.join(self.path, ".svn")
+ if not os.path.isdir(dotsvn) or is_emptydir(dotsvn):
+ logging.info("autopopulate %s from %s" % (self.path, self.url))
+ check_call([svnbin, 'co', '-q',
+ '--force',
+ '--non-interactive',
+ '--config-option',
+ 'config:miscellany:use-commit-times=on',
+ '--', self.url, self.path],
+ env=env)
+
+ # Fetch the info for matching dirs_changed against this WC
+ info = svn_info(svnbin, env, self.path)
+ root = info['Repository Root']
+ url = info['URL']
+ relpath = url[len(root):] # also has leading '/'
+ uuid = info['Repository UUID']
+ return str(relpath), uuid
+
+
+PRODUCTION_RE_FILTER = re.compile("/websites/production/[^/]+/")
+
+class BigDoEverythingClasss(object):
+ def __init__(self, config):
+ self.svnbin = config.get_value('svnbin')
+ self.env = config.get_env()
+ self.tracking = config.get_track()
+ self.hook = config.get_optional_value('hook')
+ self.streams = config.get_value('streams').split()
+ self.worker = BackgroundWorker(self.svnbin, self.env, self.hook)
+ self.watch = [ ]
+
+ def start(self):
+ for path, url in self.tracking.items():
+ # working copies auto-register with the BDEC when they are ready.
+ WorkingCopy(self, path, url)
+
+ def wc_ready(self, wc):
+ # called when a working copy object has its basic info/url,
+ # Add it to our watchers, and trigger an svn update.
+ logging.info("Watching WC at %s <-> %s" % (wc.path, wc.url))
+ self.watch.append(wc)
+ self.worker.add_work(OP_BOOT, wc)
+
+ def _normalize_path(self, path):
+ if path[0] != '/':
+ return "/" + path
+ return posixpath.abspath(path)
+
+ def commit(self, url, commit):
+ if commit.type != 'svn' or commit.format != 1:
+ logging.info("SKIP unknown commit format (%s.%d)",
+ commit.type, commit.format)
+ return
+ logging.info("COMMIT r%d (%d paths) from %s"
+ % (commit.id, len(commit.changed), url))
+
+ paths = map(self._normalize_path, commit.changed)
+ if len(paths):
+ pre = posixpath.commonprefix(paths)
+ if pre == "/websites/":
+ # special case for svnmucc "dynamic content" buildbot commits
+ # just take the first production path to avoid updating all cms working copies
+ for p in paths:
+ m = PRODUCTION_RE_FILTER.match(p)
+ if m:
+ pre = m.group(0)
+ break
+
+ #print "Common Prefix: %s" % (pre)
+ wcs = [wc for wc in self.watch if wc.update_applies(commit.repository, pre)]
+ logging.info("Updating %d WC for r%d" % (len(wcs), commit.id))
+ for wc in wcs:
+ self.worker.add_work(OP_UPDATE, wc)
+
+
+# Start logging warnings if the work backlog reaches this many items
+BACKLOG_TOO_HIGH = 20
+OP_BOOT = 'boot'
+OP_UPDATE = 'update'
+OP_CLEANUP = 'cleanup'
+
+class BackgroundWorker(threading.Thread):
+ def __init__(self, svnbin, env, hook):
+ threading.Thread.__init__(self)
+
+ # The main thread/process should not wait for this thread to exit.
+ ### compat with Python 2.5
+ self.setDaemon(True)
+
+ self.svnbin = svnbin
+ self.env = env
+ self.hook = hook
+ self.q = Queue.Queue()
+
+ self.has_started = False
+
+ def run(self):
+ while True:
+ # This will block until something arrives
+ operation, wc = self.q.get()
+
+ # Warn if the queue is too long.
+ # (Note: the other thread might have added entries to self.q
+ # after the .get() and before the .qsize().)
+ qsize = self.q.qsize()+1
+ if operation != OP_BOOT and qsize > BACKLOG_TOO_HIGH:
+ logging.warn('worker backlog is at %d', qsize)
+
+ try:
+ if operation == OP_UPDATE:
+ self._update(wc)
+ elif operation == OP_BOOT:
+ self._update(wc, boot=True)
+ elif operation == OP_CLEANUP:
+ self._cleanup(wc)
+ else:
+ logging.critical('unknown operation: %s', operation)
+ except:
+ logging.exception('exception in worker')
+
+ # In case we ever want to .join() against the work queue
+ self.q.task_done()
+
+ def add_work(self, operation, wc):
+ # Start the thread when work first arrives. Thread-start needs to
+ # be delayed in case the process forks itself to become a daemon.
+ if not self.has_started:
+ self.start()
+ self.has_started = True
+
+ self.q.put((operation, wc))
+
+ def _update(self, wc, boot=False):
+ "Update the specified working copy."
+
+ # For giggles, let's clean up the working copy in case something
+ # happened earlier.
+ self._cleanup(wc)
+
+ logging.info("updating: %s", wc.path)
+
+ ### we need to move some of these args into the config. these are
+ ### still specific to the ASF setup.
+ args = [self.svnbin, 'switch',
+ '--quiet',
+ '--non-interactive',
+ '--trust-server-cert',
+ '--ignore-externals',
+ '--config-option',
+ 'config:miscellany:use-commit-times=on',
+ '--',
+ wc.url,
+ wc.path]
+ check_call(args, env=self.env)
+
+ ### check the loglevel before running 'svn info'?
+ info = svn_info(self.svnbin, self.env, wc.path)
+ logging.info("updated: %s now at r%s", wc.path, info['Revision'])
+
+ ## Run the hook
+ if self.hook:
+ hook_mode = ['post-update', 'boot'][boot]
+ logging.info('running hook: %s at revision %s due to %s',
+ wc.path, info['Revision'], hook_mode)
+ args = [self.hook, hook_mode,
+ wc.path, info['Revision'], wc.url]
+ check_call(args, env=self.env)
+
+ def _cleanup(self, wc):
+ "Run a cleanup on the specified working copy."
+
+ ### we need to move some of these args into the config. these are
+ ### still specific to the ASF setup.
+ args = [self.svnbin, 'cleanup',
+ '--non-interactive',
+ '--trust-server-cert',
+ '--config-option',
+ 'config:miscellany:use-commit-times=on',
+ wc.path]
+ check_call(args, env=self.env)
+
+
+class ReloadableConfig(ConfigParser.SafeConfigParser):
+ def __init__(self, fname):
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ self.fname = fname
+ self.read(fname)
+
+ ### install a signal handler to set SHOULD_RELOAD. BDEC should
+ ### poll this flag, and then adjust its internal structures after
+ ### the reload.
+ self.should_reload = False
+
+ def reload(self):
+ # Delete everything. Just re-reading would overlay, and would not
+ # remove sections/options. Note that [DEFAULT] will not be removed.
+ for section in self.sections():
+ self.remove_section(section)
+
+ # Now re-read the configuration file.
+ self.read(fname)
+
+ def get_value(self, which):
+ return self.get(ConfigParser.DEFAULTSECT, which)
+
+ def get_optional_value(self, which, default=None):
+ if self.has_option(ConfigParser.DEFAULTSECT, which):
+ return self.get(ConfigParser.DEFAULTSECT, which)
+ else:
+ return default
+
+ def get_env(self):
+ env = os.environ.copy()
+ default_options = self.defaults().keys()
+ for name, value in self.items('env'):
+ if name not in default_options:
+ env[name] = value
+ return env
+
+ def get_track(self):
+ "Return the {PATH: URL} dictionary of working copies to track."
+ track = dict(self.items('track'))
+ for name in self.defaults().keys():
+ del track[name]
+ return track
+
+ def optionxform(self, option):
+ # Do not lowercase the option name.
+ return str(option)
+
+
+class Daemon(daemonize.Daemon):
+ def __init__(self, logfile, pidfile, umask, bdec):
+ daemonize.Daemon.__init__(self, logfile, pidfile)
+
+ self.umask = umask
+ self.bdec = bdec
+
+ def setup(self):
+ # There is no setup which the parent needs to wait for.
+ pass
+
+ def run(self):
+ logging.info('svnwcsub started, pid=%d', os.getpid())
+
+ # Set the umask in the daemon process. Defaults to 000 for
+ # daemonized processes. Foreground processes simply inherit
+ # the value from the parent process.
+ if self.umask is not None:
+ umask = int(self.umask, 8)
+ os.umask(umask)
+ logging.info('umask set to %03o', umask)
+
+ # Start the BDEC (on the main thread), then start the client
+ self.bdec.start()
+
+ mc = svnpubsub.client.MultiClient(self.bdec.streams,
+ self.bdec.commit,
+ self._event)
+ mc.run_forever()
+
+ def _event(self, url, event_name, event_arg):
+ if event_name == 'error':
+ logging.exception('from %s', url)
+ elif event_name == 'ping':
+ logging.debug('ping from %s', url)
+ else:
+ logging.info('"%s" from %s', event_name, url)
+
+
+def prepare_logging(logfile):
+ "Log to the specified file, or to stdout if None."
+
+ if logfile:
+ # Rotate logs daily, keeping 7 days worth.
+ handler = logging.handlers.TimedRotatingFileHandler(
+ logfile, when='midnight', backupCount=7,
+ )
+ else:
+ handler = logging.StreamHandler(sys.stdout)
+
+ # Add a timestamp to the log records
+ formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s',
+ '%Y-%m-%d %H:%M:%S')
+ handler.setFormatter(formatter)
+
+ # Apply the handler to the root logger
+ root = logging.getLogger()
+ root.addHandler(handler)
+
+ ### use logging.INFO for now. switch to cmdline option or a config?
+ root.setLevel(logging.INFO)
+
+
+def handle_options(options):
+ # Set up the logging, then process the rest of the options.
+ prepare_logging(options.logfile)
+
+ # In daemon mode, we let the daemonize module handle the pidfile.
+ # Otherwise, we should write this (foreground) PID into the file.
+ if options.pidfile and not options.daemon:
+ pid = os.getpid()
+ # Be wary of symlink attacks
+ try:
+ os.remove(options.pidfile)
+ except OSError:
+ pass
+ fd = os.open(options.pidfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL,
+ 0444)
+ os.write(fd, '%d\n' % pid)
+ os.close(fd)
+ logging.info('pid %d written to %s', pid, options.pidfile)
+
+ if options.gid:
+ try:
+ gid = int(options.gid)
+ except ValueError:
+ import grp
+ gid = grp.getgrnam(options.gid)[2]
+ logging.info('setting gid %d', gid)
+ os.setgid(gid)
+
+ if options.uid:
+ try:
+ uid = int(options.uid)
+ except ValueError:
+ import pwd
+ uid = pwd.getpwnam(options.uid)[2]
+ logging.info('setting uid %d', uid)
+ os.setuid(uid)
+
+
+def main(args):
+ parser = optparse.OptionParser(
+ description='An SvnPubSub client to keep working copies synchronized '
+ 'with a repository.',
+ usage='Usage: %prog [options] CONFIG_FILE',
+ )
+ parser.add_option('--logfile',
+ help='filename for logging')
+ parser.add_option('--pidfile',
+ help="the process' PID will be written to this file")
+ parser.add_option('--uid',
+ help='switch to this UID before running')
+ parser.add_option('--gid',
+ help='switch to this GID before running')
+ parser.add_option('--umask',
+ help='set this (octal) umask before running')
+ parser.add_option('--daemon', action='store_true',
+ help='run as a background daemon')
+
+ options, extra = parser.parse_args(args)
+
+ if len(extra) != 1:
+ parser.error('CONFIG_FILE is required')
+ config_file = extra[0]
+
+ if options.daemon and not options.logfile:
+ parser.error('LOGFILE is required when running as a daemon')
+ if options.daemon and not options.pidfile:
+ parser.error('PIDFILE is required when running as a daemon')
+
+ # Process any provided options.
+ handle_options(options)
+
+ c = ReloadableConfig(config_file)
+ bdec = BigDoEverythingClasss(c)
+
+ # We manage the logfile ourselves (along with possible rotation). The
+ # daemon process can just drop stdout/stderr into /dev/null.
+ d = Daemon('/dev/null', options.pidfile, options.umask, bdec)
+ if options.daemon:
+ # Daemonize the process and call sys.exit() with appropriate code
+ d.daemonize_exit()
+ else:
+ # Just run in the foreground (the default)
+ d.foreground()
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/tools/server-side/svnpubsub/testserver.py b/tools/server-side/svnpubsub/testserver.py
new file mode 100755
index 0000000..8966a95
--- /dev/null
+++ b/tools/server-side/svnpubsub/testserver.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# A simple test server for responding in different ways to SvnPubSub clients.
+# This avoids the complexity of the Twisted framework in order to direct
+# various (abnormal) conditions at the client.
+#
+# ### usage...
+#
+
+import sys
+import BaseHTTPServer
+
+
+PORT = 2069
+
+TEST_BODY = '{"svnpubsub": {"version": 1}}\n\0{"commit": {"type": "svn", "format": 1, "repository": "12345678-1234-1234-1234-123456789012", "id": "1234", "committer": "johndoe", "date": "2012-01-01 01:01:01 +0000 (Sun, 01 Jan 2012)", "log": "Frob the ganoozle with the snookish", "changed": {"one/path/alpha": {"flags": "U "}, "some/other/directory/": {"flags": "_U "}}}}\n\0'
+
+SEND_KEEPALIVE = True
+
+
+class TestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header('Content-Length', str(len(TEST_BODY)))
+ self.send_header('Connection', 'keep-alive')
+ self.end_headers()
+ self.wfile.write(TEST_BODY)
+
+
+if __name__ == '__main__':
+ server = BaseHTTPServer.HTTPServer(('', PORT), TestHandler)
+ sys.stderr.write('Now listening on port %d...\n' % (PORT,))
+ server.serve_forever()
diff --git a/tools/server-side/svnpubsub/watcher.py b/tools/server-side/svnpubsub/watcher.py
new file mode 100755
index 0000000..340b100
--- /dev/null
+++ b/tools/server-side/svnpubsub/watcher.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Watch for events from SvnPubSub and print them to stdout
+#
+#
+
+import sys
+import pprint
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+import svnpubsub.client
+
+
+def _commit(url, commit):
+ print('COMMIT: from %s' % url)
+ pprint.pprint(vars(commit), indent=2)
+
+
+def _event(url, event_name, event_arg):
+ if event_arg:
+ print('EVENT: from %s "%s" "%s"' % (url, event_name, event_arg))
+ else:
+ print('EVENT: from %s "%s"' % (url, event_name))
+
+
+def main(urls):
+ mc = svnpubsub.client.MultiClient(urls, _commit, _event)
+ mc.run_forever()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ print("usage: watcher.py URL [URL...]")
+ sys.exit(0)
+ main(sys.argv[1:])