summaryrefslogtreecommitdiff
path: root/hgext
diff options
context:
space:
mode:
authorLorry <lorry@roadtrain.codethink.co.uk>2012-08-22 14:49:51 +0100
committerLorry <lorry@roadtrain.codethink.co.uk>2012-08-22 14:49:51 +0100
commita498da43c7fdb9f24b73680c02a4a3588cc62d9a (patch)
treedaf8119dae1749b5165b68033a1b23a7375ce9ce /hgext
downloadmercurial-tarball-a498da43c7fdb9f24b73680c02a4a3588cc62d9a.tar.gz
Tarball conversion
Diffstat (limited to 'hgext')
-rw-r--r--hgext/__init__.py1
-rw-r--r--hgext/acl.py316
-rw-r--r--hgext/bugzilla.py915
-rw-r--r--hgext/children.py50
-rw-r--r--hgext/churn.py199
-rw-r--r--hgext/color.py503
-rw-r--r--hgext/convert/__init__.py370
-rw-r--r--hgext/convert/bzr.py285
-rw-r--r--hgext/convert/common.py445
-rw-r--r--hgext/convert/convcmd.py470
-rw-r--r--hgext/convert/cvs.py272
-rw-r--r--hgext/convert/cvsps.py853
-rw-r--r--hgext/convert/darcs.py200
-rw-r--r--hgext/convert/filemap.py391
-rw-r--r--hgext/convert/git.py217
-rw-r--r--hgext/convert/gnuarch.py338
-rw-r--r--hgext/convert/hg.py395
-rw-r--r--hgext/convert/monotone.py360
-rw-r--r--hgext/convert/p4.py203
-rw-r--r--hgext/convert/subversion.py1251
-rw-r--r--hgext/convert/transport.py128
-rw-r--r--hgext/eol.py349
-rw-r--r--hgext/extdiff.py331
-rw-r--r--hgext/factotum.py120
-rw-r--r--hgext/fetch.py158
-rw-r--r--hgext/gpg.py289
-rw-r--r--hgext/graphlog.py54
-rw-r--r--hgext/hgcia.py277
-rw-r--r--hgext/hgk.py352
-rw-r--r--hgext/highlight/__init__.py64
-rw-r--r--hgext/highlight/highlight.py61
-rw-r--r--hgext/histedit.py715
-rw-r--r--hgext/inotify/__init__.py93
-rw-r--r--hgext/inotify/client.py172
-rw-r--r--hgext/inotify/common.py53
-rw-r--r--hgext/inotify/linux/__init__.py44
-rw-r--r--hgext/inotify/linux/_inotify.c649
-rw-r--r--hgext/inotify/linux/watcher.py335
-rw-r--r--hgext/inotify/linuxserver.py444
-rw-r--r--hgext/inotify/server.py492
-rw-r--r--hgext/interhg.py83
-rw-r--r--hgext/keyword.py730
-rw-r--r--hgext/largefiles/CONTRIBUTORS4
-rw-r--r--hgext/largefiles/__init__.py102
-rw-r--r--hgext/largefiles/basestore.py195
-rw-r--r--hgext/largefiles/lfcommands.py549
-rw-r--r--hgext/largefiles/lfutil.py467
-rw-r--r--hgext/largefiles/localstore.py82
-rw-r--r--hgext/largefiles/overrides.py1080
-rw-r--r--hgext/largefiles/proto.py173
-rw-r--r--hgext/largefiles/remotestore.py110
-rw-r--r--hgext/largefiles/reposetup.py475
-rw-r--r--hgext/largefiles/uisetup.py167
-rw-r--r--hgext/largefiles/wirestore.py37
-rw-r--r--hgext/mq.py3597
-rw-r--r--hgext/notify.py382
-rw-r--r--hgext/pager.py140
-rw-r--r--hgext/patchbomb.py558
-rw-r--r--hgext/progress.py295
-rw-r--r--hgext/purge.py110
-rw-r--r--hgext/rebase.py708
-rw-r--r--hgext/record.py666
-rw-r--r--hgext/relink.py184
-rw-r--r--hgext/schemes.py101
-rw-r--r--hgext/share.py75
-rw-r--r--hgext/transplant.py676
-rw-r--r--hgext/win32mbcs.py167
-rw-r--r--hgext/win32text.py172
-rw-r--r--hgext/zeroconf/Zeroconf.py1582
-rw-r--r--hgext/zeroconf/__init__.py188
70 files changed, 27069 insertions, 0 deletions
diff --git a/hgext/__init__.py b/hgext/__init__.py
new file mode 100644
index 0000000..fdffa2a
--- /dev/null
+++ b/hgext/__init__.py
@@ -0,0 +1 @@
+# placeholder
diff --git a/hgext/acl.py b/hgext/acl.py
new file mode 100644
index 0000000..2bf41aa
--- /dev/null
+++ b/hgext/acl.py
@@ -0,0 +1,316 @@
+# acl.py - changeset access control for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''hooks for controlling repository access
+
+This hook makes it possible to allow or deny write access to given
+branches and paths of a repository when receiving incoming changesets
+via pretxnchangegroup and pretxncommit.
+
+The authorization is matched based on the local user name on the
+system where the hook runs, and not the committer of the original
+changeset (since the latter is merely informative).
+
+The acl hook is best used along with a restricted shell like hgsh,
+preventing authenticating users from doing anything other than pushing
+or pulling. The hook is not safe to use if users have interactive
+shell access, as they can then disable the hook. Nor is it safe if
+remote users share an account, because then there is no way to
+distinguish them.
+
+The order in which access checks are performed is:
+
+1) Deny list for branches (section ``acl.deny.branches``)
+2) Allow list for branches (section ``acl.allow.branches``)
+3) Deny list for paths (section ``acl.deny``)
+4) Allow list for paths (section ``acl.allow``)
+
+The allow and deny sections take key-value pairs.
+
+Branch-based Access Control
+---------------------------
+
+Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
+have branch-based access control. Keys in these sections can be
+either:
+
+- a branch name, or
+- an asterisk, to match any branch;
+
+The corresponding values can be either:
+
+- a comma-separated list containing users and groups, or
+- an asterisk, to match anyone;
+
+You can add the "!" prefix to a user or group name to invert the sense
+of the match.
+
+Path-based Access Control
+-------------------------
+
+Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
+access control. Keys in these sections accept a subtree pattern (with
+a glob syntax by default). The corresponding values follow the same
+syntax as the other sections above.
+
+Groups
+------
+
+Group names must be prefixed with an ``@`` symbol. Specifying a group
+name has the same effect as specifying all the users in that group.
+
+You can define group members in the ``acl.groups`` section.
+If a group name is not defined there, and Mercurial is running under
+a Unix-like system, the list of users will be taken from the OS.
+Otherwise, an exception will be raised.
+
+Example Configuration
+---------------------
+
+::
+
+ [hooks]
+
+ # Use this if you want to check access restrictions at commit time
+ pretxncommit.acl = python:hgext.acl.hook
+
+ # Use this if you want to check access restrictions for pull, push,
+ # bundle and serve.
+ pretxnchangegroup.acl = python:hgext.acl.hook
+
+ [acl]
+ # Allow or deny access for incoming changes only if their source is
+ # listed here, let them pass otherwise. Source is "serve" for all
+ # remote access (http or ssh), "push", "pull" or "bundle" when the
+ # related commands are run locally.
+ # Default: serve
+ sources = serve
+
+ [acl.deny.branches]
+
+ # Everyone is denied to the frozen branch:
+ frozen-branch = *
+
+ # A bad user is denied on all branches:
+ * = bad-user
+
+ [acl.allow.branches]
+
+ # A few users are allowed on branch-a:
+ branch-a = user-1, user-2, user-3
+
+ # Only one user is allowed on branch-b:
+ branch-b = user-1
+
+ # The super user is allowed on any branch:
+ * = super-user
+
+ # Everyone is allowed on branch-for-tests:
+ branch-for-tests = *
+
+ [acl.deny]
+ # This list is checked first. If a match is found, acl.allow is not
+ # checked. All users are granted access if acl.deny is not present.
+ # Format for both lists: glob pattern = user, ..., @group, ...
+
+ # To match everyone, use an asterisk for the user:
+ # my/glob/pattern = *
+
+ # user6 will not have write access to any file:
+ ** = user6
+
+ # Group "hg-denied" will not have write access to any file:
+ ** = @hg-denied
+
+ # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
+ # everyone being able to change all other files. See below.
+ src/main/resources/DONT-TOUCH-THIS.txt = *
+
+ [acl.allow]
+ # if acl.allow is not present, all users are allowed by default
+ # empty acl.allow = no users allowed
+
+ # User "doc_writer" has write access to any file under the "docs"
+ # folder:
+ docs/** = doc_writer
+
+ # User "jack" and group "designers" have write access to any file
+ # under the "images" folder:
+ images/** = jack, @designers
+
+ # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
+ # will have write access to any file under the "resources" folder
+ # (except for 1 file. See acl.deny):
+ src/main/resources/** = *
+
+ .hgtags = release_engineer
+
+Examples using the "!" prefix
+.............................
+
+Suppose there's a branch that only a given user (or group) should be able to
+push to, and you don't want to restrict access to any other branch that may
+be created.
+
+The "!" prefix allows you to prevent anyone except a given user or group to
+push changesets in a given branch or path.
+
+In the examples below, we will:
+1) Deny access to branch "ring" to anyone but user "gollum"
+2) Deny access to branch "lake" to anyone but members of the group "hobbit"
+3) Deny access to a file to anyone but user "gollum"
+
+::
+
+ [acl.allow.branches]
+ # Empty
+
+ [acl.deny.branches]
+
+ # 1) only 'gollum' can commit to branch 'ring';
+ # 'gollum' and anyone else can still commit to any other branch.
+ ring = !gollum
+
+ # 2) only members of the group 'hobbit' can commit to branch 'lake';
+ # 'hobbit' members and anyone else can still commit to any other branch.
+ lake = !@hobbit
+
+ # You can also deny access based on file paths:
+
+ [acl.allow]
+ # Empty
+
+ [acl.deny]
+ # 3) only 'gollum' can change the file below;
+ # 'gollum' and anyone else can still change any other file.
+ /misty/mountains/cave/ring = !gollum
+
+'''
+
+from mercurial.i18n import _
+from mercurial import util, match
+import getpass, urllib
+
+testedwith = 'internal'
+
+def _getusers(ui, group):
+
+ # First, try to use group definition from section [acl.groups]
+ hgrcusers = ui.configlist('acl.groups', group)
+ if hgrcusers:
+ return hgrcusers
+
+ ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
+ # If no users found in group definition, get users from OS-level group
+ try:
+ return util.groupmembers(group)
+ except KeyError:
+ raise util.Abort(_("group '%s' is undefined") % group)
+
+def _usermatch(ui, user, usersorgroups):
+
+ if usersorgroups == '*':
+ return True
+
+ for ug in usersorgroups.replace(',', ' ').split():
+
+ if ug.startswith('!'):
+ # Test for excluded user or group. Format:
+ # if ug is a user name: !username
+ # if ug is a group name: !@groupname
+ ug = ug[1:]
+ if not ug.startswith('@') and user != ug \
+ or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
+ return True
+
+ # Test for user or group. Format:
+ # if ug is a user name: username
+ # if ug is a group name: @groupname
+ elif user == ug \
+ or ug.startswith('@') and user in _getusers(ui, ug[1:]):
+ return True
+
+ return False
+
+def buildmatch(ui, repo, user, key):
+ '''return tuple of (match function, list enabled).'''
+ if not ui.has_section(key):
+ ui.debug('acl: %s not enabled\n' % key)
+ return None
+
+ pats = [pat for pat, users in ui.configitems(key)
+ if _usermatch(ui, user, users)]
+ ui.debug('acl: %s enabled, %d entries for user %s\n' %
+ (key, len(pats), user))
+
+ # Branch-based ACL
+ if not repo:
+ if pats:
+ # If there's an asterisk (meaning "any branch"), always return True;
+ # Otherwise, test if b is in pats
+ if '*' in pats:
+ return util.always
+ return lambda b: b in pats
+ return util.never
+
+ # Path-based ACL
+ if pats:
+ return match.match(repo.root, '', pats)
+ return util.never
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+ if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
+ raise util.Abort(_('config error - hook type "%s" cannot stop '
+ 'incoming changesets nor commits') % hooktype)
+ if (hooktype == 'pretxnchangegroup' and
+ source not in ui.config('acl', 'sources', 'serve').split()):
+ ui.debug('acl: changes have source "%s" - skipping\n' % source)
+ return
+
+ user = None
+ if source == 'serve' and 'url' in kwargs:
+ url = kwargs['url'].split(':')
+ if url[0] == 'remote' and url[1].startswith('http'):
+ user = urllib.unquote(url[3])
+
+ if user is None:
+ user = getpass.getuser()
+
+ ui.debug('acl: checking access for user "%s"\n' % user)
+
+ cfg = ui.config('acl', 'config')
+ if cfg:
+ ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches',
+ 'acl.deny.branches', 'acl.allow', 'acl.deny'])
+
+ allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
+ denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
+ allow = buildmatch(ui, repo, user, 'acl.allow')
+ deny = buildmatch(ui, repo, user, 'acl.deny')
+
+ for rev in xrange(repo[node], len(repo)):
+ ctx = repo[rev]
+ branch = ctx.branch()
+ if denybranches and denybranches(branch):
+ raise util.Abort(_('acl: user "%s" denied on branch "%s"'
+ ' (changeset "%s")')
+ % (user, branch, ctx))
+ if allowbranches and not allowbranches(branch):
+ raise util.Abort(_('acl: user "%s" not allowed on branch "%s"'
+ ' (changeset "%s")')
+ % (user, branch, ctx))
+ ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
+ % (ctx, branch))
+
+ for f in ctx.files():
+ if deny and deny(f):
+ raise util.Abort(_('acl: user "%s" denied on "%s"'
+ ' (changeset "%s")') % (user, f, ctx))
+ if allow and not allow(f):
+ raise util.Abort(_('acl: user "%s" not allowed on "%s"'
+ ' (changeset "%s")') % (user, f, ctx))
+ ui.debug('acl: path access granted: "%s"\n' % ctx)
diff --git a/hgext/bugzilla.py b/hgext/bugzilla.py
new file mode 100644
index 0000000..42eef74
--- /dev/null
+++ b/hgext/bugzilla.py
@@ -0,0 +1,915 @@
+# bugzilla.py - bugzilla integration for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+# Copyright 2011-2 Jim Hague <jim.hague@acm.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''hooks for integrating with the Bugzilla bug tracker
+
+This hook extension adds comments on bugs in Bugzilla when changesets
+that refer to bugs by Bugzilla ID are seen. The comment is formatted using
+the Mercurial template mechanism.
+
+The bug references can optionally include an update for Bugzilla of the
+hours spent working on the bug. Bugs can also be marked fixed.
+
+Three basic modes of access to Bugzilla are provided:
+
+1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
+
+2. Check data via the Bugzilla XMLRPC interface and submit bug change
+ via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
+
+3. Writing directly to the Bugzilla database. Only Bugzilla installations
+ using MySQL are supported. Requires Python MySQLdb.
+
+Writing directly to the database is susceptible to schema changes, and
+relies on a Bugzilla contrib script to send out bug change
+notification emails. This script runs as the user running Mercurial,
+must be run on the host with the Bugzilla install, and requires
+permission to read Bugzilla configuration details and the necessary
+MySQL user and password to have full access rights to the Bugzilla
+database. For these reasons this access mode is now considered
+deprecated, and will not be updated for new Bugzilla versions going
+forward. Only adding comments is supported in this access mode.
+
+Access via XMLRPC needs a Bugzilla username and password to be specified
+in the configuration. Comments are added under that username. Since the
+configuration must be readable by all Mercurial users, it is recommended
+that the rights of that user are restricted in Bugzilla to the minimum
+necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
+
+Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
+email to the Bugzilla email interface to submit comments to bugs.
+The From: address in the email is set to the email address of the Mercurial
+user, so the comment appears to come from the Mercurial user. In the event
+that the Mercurial user email is not recognised by Bugzilla as a Bugzilla
+user, the email associated with the Bugzilla username used to log into
+Bugzilla is used instead as the source of the comment. Marking bugs fixed
+works on all supported Bugzilla versions.
+
+Configuration items common to all access modes:
+
+bugzilla.version
+ This access type to use. Values recognised are:
+
+ :``xmlrpc``: Bugzilla XMLRPC interface.
+ :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
+ :``3.0``: MySQL access, Bugzilla 3.0 and later.
+ :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
+ including 3.0.
+ :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
+ including 2.18.
+
+bugzilla.regexp
+ Regular expression to match bug IDs for update in changeset commit message.
+ It must contain one "()" named group ``<ids>`` containing the bug
+ IDs separated by non-digit characters. It may also contain
+ a named group ``<hours>`` with a floating-point number giving the
+ hours worked on the bug. If no named groups are present, the first
+ "()" group is assumed to contain the bug IDs, and work time is not
+ updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
+ ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
+ variations thereof, followed by an hours number prefixed by ``h`` or
+ ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
+
+bugzilla.fixregexp
+ Regular expression to match bug IDs for marking fixed in changeset
+ commit message. This must contain a "()" named group ``<ids>` containing
+ the bug IDs separated by non-digit characters. It may also contain
+ a named group ``<hours>`` with a floating-point number giving the
+ hours worked on the bug. If no named groups are present, the first
+ "()" group is assumed to contain the bug IDs, and work time is not
+ updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
+ ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
+ variations thereof, followed by an hours number prefixed by ``h`` or
+ ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
+
+bugzilla.fixstatus
+ The status to set a bug to when marking fixed. Default ``RESOLVED``.
+
+bugzilla.fixresolution
+ The resolution to set a bug to when marking fixed. Default ``FIXED``.
+
+bugzilla.style
+ The style file to use when formatting comments.
+
+bugzilla.template
+ Template to use when formatting comments. Overrides style if
+ specified. In addition to the usual Mercurial keywords, the
+ extension specifies:
+
+ :``{bug}``: The Bugzilla bug ID.
+ :``{root}``: The full pathname of the Mercurial repository.
+ :``{webroot}``: Stripped pathname of the Mercurial repository.
+ :``{hgweb}``: Base URL for browsing Mercurial repositories.
+
+ Default ``changeset {node|short} in repo {root} refers to bug
+ {bug}.\\ndetails:\\n\\t{desc|tabindent}``
+
+bugzilla.strip
+ The number of path separator characters to strip from the front of
+ the Mercurial repository path (``{root}`` in templates) to produce
+ ``{webroot}``. For example, a repository with ``{root}``
+ ``/var/local/my-project`` with a strip of 2 gives a value for
+ ``{webroot}`` of ``my-project``. Default 0.
+
+web.baseurl
+ Base URL for browsing Mercurial repositories. Referenced from
+ templates as ``{hgweb}``.
+
+Configuration items common to XMLRPC+email and MySQL access modes:
+
+bugzilla.usermap
+ Path of file containing Mercurial committer email to Bugzilla user email
+ mappings. If specified, the file should contain one mapping per
+ line::
+
+ committer = Bugzilla user
+
+ See also the ``[usermap]`` section.
+
+The ``[usermap]`` section is used to specify mappings of Mercurial
+committer email to Bugzilla user email. See also ``bugzilla.usermap``.
+Contains entries of the form ``committer = Bugzilla user``.
+
+XMLRPC access mode configuration:
+
+bugzilla.bzurl
+ The base URL for the Bugzilla installation.
+ Default ``http://localhost/bugzilla``.
+
+bugzilla.user
+ The username to use to log into Bugzilla via XMLRPC. Default
+ ``bugs``.
+
+bugzilla.password
+ The password for Bugzilla login.
+
+XMLRPC+email access mode uses the XMLRPC access mode configuration items,
+and also:
+
+bugzilla.bzemail
+ The Bugzilla email address.
+
+In addition, the Mercurial email settings must be configured. See the
+documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
+
+MySQL access mode configuration:
+
+bugzilla.host
+ Hostname of the MySQL server holding the Bugzilla database.
+ Default ``localhost``.
+
+bugzilla.db
+ Name of the Bugzilla database in MySQL. Default ``bugs``.
+
+bugzilla.user
+ Username to use to access MySQL server. Default ``bugs``.
+
+bugzilla.password
+ Password to use to access MySQL server.
+
+bugzilla.timeout
+ Database connection timeout (seconds). Default 5.
+
+bugzilla.bzuser
+ Fallback Bugzilla user name to record comments with, if changeset
+ committer cannot be found as a Bugzilla user.
+
+bugzilla.bzdir
+ Bugzilla install directory. Used by default notify. Default
+ ``/var/www/html/bugzilla``.
+
+bugzilla.notify
+ The command to run to get Bugzilla to send bug change notification
+ emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
+ id) and ``user`` (committer bugzilla email). Default depends on
+ version; from 2.18 it is "cd %(bzdir)s && perl -T
+ contrib/sendbugmail.pl %(id)s %(user)s".
+
+Activating the extension::
+
+ [extensions]
+ bugzilla =
+
+ [hooks]
+ # run bugzilla hook on every change pulled or pushed in here
+ incoming.bugzilla = python:hgext.bugzilla.hook
+
+Example configurations:
+
+XMLRPC example configuration. This uses the Bugzilla at
+``http://my-project.org/bugzilla``, logging in as user
+``bugmail@my-project.org`` with password ``plugh``. It is used with a
+collection of Mercurial repositories in ``/var/local/hg/repos/``,
+with a web interface at ``http://my-project.org/hg``. ::
+
+ [bugzilla]
+ bzurl=http://my-project.org/bugzilla
+ user=bugmail@my-project.org
+ password=plugh
+ version=xmlrpc
+ template=Changeset {node|short} in {root|basename}.
+ {hgweb}/{webroot}/rev/{node|short}\\n
+ {desc}\\n
+ strip=5
+
+ [web]
+ baseurl=http://my-project.org/hg
+
+XMLRPC+email example configuration. This uses the Bugzilla at
+``http://my-project.org/bugzilla``, logging in as user
+``bugmail@my-project.org`` with password ``plugh``. It is used with a
+collection of Mercurial repositories in ``/var/local/hg/repos/``,
+with a web interface at ``http://my-project.org/hg``. Bug comments
+are sent to the Bugzilla email address
+``bugzilla@my-project.org``. ::
+
+ [bugzilla]
+ bzurl=http://my-project.org/bugzilla
+ user=bugmail@my-project.org
+ password=plugh
+ version=xmlrpc
+ bzemail=bugzilla@my-project.org
+ template=Changeset {node|short} in {root|basename}.
+ {hgweb}/{webroot}/rev/{node|short}\\n
+ {desc}\\n
+ strip=5
+
+ [web]
+ baseurl=http://my-project.org/hg
+
+ [usermap]
+ user@emaildomain.com=user.name@bugzilladomain.com
+
+MySQL example configuration. This has a local Bugzilla 3.2 installation
+in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
+the Bugzilla database name is ``bugs`` and MySQL is
+accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
+with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
+with a web interface at ``http://my-project.org/hg``. ::
+
+ [bugzilla]
+ host=localhost
+ password=XYZZY
+ version=3.0
+ bzuser=unknown@domain.com
+ bzdir=/opt/bugzilla-3.2
+ template=Changeset {node|short} in {root|basename}.
+ {hgweb}/{webroot}/rev/{node|short}\\n
+ {desc}\\n
+ strip=5
+
+ [web]
+ baseurl=http://my-project.org/hg
+
+ [usermap]
+ user@emaildomain.com=user.name@bugzilladomain.com
+
+All the above add a comment to the Bugzilla bug record of the form::
+
+ Changeset 3b16791d6642 in repository-name.
+ http://my-project.org/hg/repository-name/rev/3b16791d6642
+
+ Changeset commit comment. Bug 1234.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import cmdutil, mail, templater, util
+import re, time, urlparse, xmlrpclib
+
+testedwith = 'internal'
+
+class bzaccess(object):
+ '''Base class for access to Bugzilla.'''
+
+ def __init__(self, ui):
+ self.ui = ui
+ usermap = self.ui.config('bugzilla', 'usermap')
+ if usermap:
+ self.ui.readconfig(usermap, sections=['usermap'])
+
+ def map_committer(self, user):
+ '''map name of committer to Bugzilla user name.'''
+ for committer, bzuser in self.ui.configitems('usermap'):
+ if committer.lower() == user.lower():
+ return bzuser
+ return user
+
+ # Methods to be implemented by access classes.
+ #
+ # 'bugs' is a dict keyed on bug id, where values are a dict holding
+ # updates to bug state. Recognised dict keys are:
+ #
+ # 'hours': Value, float containing work hours to be updated.
+ # 'fix': If key present, bug is to be marked fixed. Value ignored.
+
+ def filter_real_bug_ids(self, bugs):
+ '''remove bug IDs that do not exist in Bugzilla from bugs.'''
+ pass
+
+ def filter_cset_known_bug_ids(self, node, bugs):
+ '''remove bug IDs where node occurs in comment text from bugs.'''
+ pass
+
+ def updatebug(self, bugid, newstate, text, committer):
+ '''update the specified bug. Add comment text and set new states.
+
+ If possible add the comment as being from the committer of
+ the changeset. Otherwise use the default Bugzilla user.
+ '''
+ pass
+
+ def notify(self, bugs, committer):
+ '''Force sending of Bugzilla notification emails.
+
+ Only required if the access method does not trigger notification
+ emails automatically.
+ '''
+ pass
+
+# Bugzilla via direct access to MySQL database.
+class bzmysql(bzaccess):
+ '''Support for direct MySQL access to Bugzilla.
+
+ The earliest Bugzilla version this is tested with is version 2.16.
+
+ If your Bugzilla is version 3.4 or above, you are strongly
+ recommended to use the XMLRPC access method instead.
+ '''
+
+ @staticmethod
+ def sql_buglist(ids):
+ '''return SQL-friendly list of bug ids'''
+ return '(' + ','.join(map(str, ids)) + ')'
+
+ _MySQLdb = None
+
+ def __init__(self, ui):
+ try:
+ import MySQLdb as mysql
+ bzmysql._MySQLdb = mysql
+ except ImportError, err:
+ raise util.Abort(_('python mysql support not available: %s') % err)
+
+ bzaccess.__init__(self, ui)
+
+ host = self.ui.config('bugzilla', 'host', 'localhost')
+ user = self.ui.config('bugzilla', 'user', 'bugs')
+ passwd = self.ui.config('bugzilla', 'password')
+ db = self.ui.config('bugzilla', 'db', 'bugs')
+ timeout = int(self.ui.config('bugzilla', 'timeout', 5))
+ self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
+ (host, db, user, '*' * len(passwd)))
+ self.conn = bzmysql._MySQLdb.connect(host=host,
+ user=user, passwd=passwd,
+ db=db,
+ connect_timeout=timeout)
+ self.cursor = self.conn.cursor()
+ self.longdesc_id = self.get_longdesc_id()
+ self.user_ids = {}
+ self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
+
+ def run(self, *args, **kwargs):
+ '''run a query.'''
+ self.ui.note(_('query: %s %s\n') % (args, kwargs))
+ try:
+ self.cursor.execute(*args, **kwargs)
+ except bzmysql._MySQLdb.MySQLError:
+ self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
+ raise
+
+ def get_longdesc_id(self):
+ '''get identity of longdesc field'''
+ self.run('select fieldid from fielddefs where name = "longdesc"')
+ ids = self.cursor.fetchall()
+ if len(ids) != 1:
+ raise util.Abort(_('unknown database schema'))
+ return ids[0][0]
+
+ def filter_real_bug_ids(self, bugs):
+ '''filter not-existing bugs from set.'''
+ self.run('select bug_id from bugs where bug_id in %s' %
+ bzmysql.sql_buglist(bugs.keys()))
+ existing = [id for (id,) in self.cursor.fetchall()]
+ for id in bugs.keys():
+ if id not in existing:
+ self.ui.status(_('bug %d does not exist\n') % id)
+ del bugs[id]
+
+ def filter_cset_known_bug_ids(self, node, bugs):
+ '''filter bug ids that already refer to this changeset from set.'''
+ self.run('''select bug_id from longdescs where
+ bug_id in %s and thetext like "%%%s%%"''' %
+ (bzmysql.sql_buglist(bugs.keys()), short(node)))
+ for (id,) in self.cursor.fetchall():
+ self.ui.status(_('bug %d already knows about changeset %s\n') %
+ (id, short(node)))
+ del bugs[id]
+
+ def notify(self, bugs, committer):
+ '''tell bugzilla to send mail.'''
+ self.ui.status(_('telling bugzilla to send mail:\n'))
+ (user, userid) = self.get_bugzilla_user(committer)
+ for id in bugs.keys():
+ self.ui.status(_(' bug %s\n') % id)
+ cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
+ bzdir = self.ui.config('bugzilla', 'bzdir',
+ '/var/www/html/bugzilla')
+ try:
+ # Backwards-compatible with old notify string, which
+ # took one string. This will throw with a new format
+ # string.
+ cmd = cmdfmt % id
+ except TypeError:
+ cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
+ self.ui.note(_('running notify command %s\n') % cmd)
+ fp = util.popen('(%s) 2>&1' % cmd)
+ out = fp.read()
+ ret = fp.close()
+ if ret:
+ self.ui.warn(out)
+ raise util.Abort(_('bugzilla notify command %s') %
+ util.explainexit(ret)[0])
+ self.ui.status(_('done\n'))
+
+ def get_user_id(self, user):
+ '''look up numeric bugzilla user id.'''
+ try:
+ return self.user_ids[user]
+ except KeyError:
+ try:
+ userid = int(user)
+ except ValueError:
+ self.ui.note(_('looking up user %s\n') % user)
+ self.run('''select userid from profiles
+ where login_name like %s''', user)
+ all = self.cursor.fetchall()
+ if len(all) != 1:
+ raise KeyError(user)
+ userid = int(all[0][0])
+ self.user_ids[user] = userid
+ return userid
+
+ def get_bugzilla_user(self, committer):
+ '''See if committer is a registered bugzilla user. Return
+ bugzilla username and userid if so. If not, return default
+ bugzilla username and userid.'''
+ user = self.map_committer(committer)
+ try:
+ userid = self.get_user_id(user)
+ except KeyError:
+ try:
+ defaultuser = self.ui.config('bugzilla', 'bzuser')
+ if not defaultuser:
+ raise util.Abort(_('cannot find bugzilla user id for %s') %
+ user)
+ userid = self.get_user_id(defaultuser)
+ user = defaultuser
+ except KeyError:
+ raise util.Abort(_('cannot find bugzilla user id for %s or %s')
+ % (user, defaultuser))
+ return (user, userid)
+
+ def updatebug(self, bugid, newstate, text, committer):
+ '''update bug state with comment text.
+
+ Try adding comment as committer of changeset, otherwise as
+ default bugzilla user.'''
+ if len(newstate) > 0:
+ self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
+
+ (user, userid) = self.get_bugzilla_user(committer)
+ now = time.strftime('%Y-%m-%d %H:%M:%S')
+ self.run('''insert into longdescs
+ (bug_id, who, bug_when, thetext)
+ values (%s, %s, %s, %s)''',
+ (bugid, userid, now, text))
+ self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
+ values (%s, %s, %s, %s)''',
+ (bugid, userid, now, self.longdesc_id))
+ self.conn.commit()
+
+class bzmysql_2_18(bzmysql):
+ '''support for bugzilla 2.18 series.'''
+
+ def __init__(self, ui):
+ bzmysql.__init__(self, ui)
+ self.default_notify = \
+ "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
+
+class bzmysql_3_0(bzmysql_2_18):
+ '''support for bugzilla 3.0 series.'''
+
+ def __init__(self, ui):
+ bzmysql_2_18.__init__(self, ui)
+
+ def get_longdesc_id(self):
+ '''get identity of longdesc field'''
+ self.run('select id from fielddefs where name = "longdesc"')
+ ids = self.cursor.fetchall()
+ if len(ids) != 1:
+ raise util.Abort(_('unknown database schema'))
+ return ids[0][0]
+
+# Buzgilla via XMLRPC interface.
+
+class cookietransportrequest(object):
+ """A Transport request method that retains cookies over its lifetime.
+
+ The regular xmlrpclib transports ignore cookies. Which causes
+ a bit of a problem when you need a cookie-based login, as with
+ the Bugzilla XMLRPC interface.
+
+ So this is a helper for defining a Transport which looks for
+ cookies being set in responses and saves them to add to all future
+ requests.
+ """
+
+ # Inspiration drawn from
+ # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
+ # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
+
+ cookies = []
+ def send_cookies(self, connection):
+ if self.cookies:
+ for cookie in self.cookies:
+ connection.putheader("Cookie", cookie)
+
+ def request(self, host, handler, request_body, verbose=0):
+ self.verbose = verbose
+ self.accept_gzip_encoding = False
+
+ # issue XML-RPC request
+ h = self.make_connection(host)
+ if verbose:
+ h.set_debuglevel(1)
+
+ self.send_request(h, handler, request_body)
+ self.send_host(h, host)
+ self.send_cookies(h)
+ self.send_user_agent(h)
+ self.send_content(h, request_body)
+
+ # Deal with differences between Python 2.4-2.6 and 2.7.
+ # In the former h is a HTTP(S). In the latter it's a
+ # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
+ # HTTP(S) has an underlying HTTP(S)Connection, so extract
+ # that and use it.
+ try:
+ response = h.getresponse()
+ except AttributeError:
+ response = h._conn.getresponse()
+
+ # Add any cookie definitions to our list.
+ for header in response.msg.getallmatchingheaders("Set-Cookie"):
+ val = header.split(": ", 1)[1]
+ cookie = val.split(";", 1)[0]
+ self.cookies.append(cookie)
+
+ if response.status != 200:
+ raise xmlrpclib.ProtocolError(host + handler, response.status,
+ response.reason, response.msg.headers)
+
+ payload = response.read()
+ parser, unmarshaller = self.getparser()
+ parser.feed(payload)
+ parser.close()
+
+ return unmarshaller.close()
+
+# The explicit calls to the underlying xmlrpclib __init__() methods are
+# necessary. The xmlrpclib.Transport classes are old-style classes, and
+# it turns out their __init__() doesn't get called when doing multiple
+# inheritance with a new-style class.
+class cookietransport(cookietransportrequest, xmlrpclib.Transport):
+ def __init__(self, use_datetime=0):
+ if util.safehasattr(xmlrpclib.Transport, "__init__"):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+
+class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
+ def __init__(self, use_datetime=0):
+ if util.safehasattr(xmlrpclib.Transport, "__init__"):
+ xmlrpclib.SafeTransport.__init__(self, use_datetime)
+
+class bzxmlrpc(bzaccess):
+ """Support for access to Bugzilla via the Bugzilla XMLRPC API.
+
+ Requires a minimum Bugzilla version 3.4.
+ """
+
+ def __init__(self, ui):
+ bzaccess.__init__(self, ui)
+
+ bzweb = self.ui.config('bugzilla', 'bzurl',
+ 'http://localhost/bugzilla/')
+ bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
+
+ user = self.ui.config('bugzilla', 'user', 'bugs')
+ passwd = self.ui.config('bugzilla', 'password')
+
+ self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
+ self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
+ 'FIXED')
+
+ self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
+ ver = self.bzproxy.Bugzilla.version()['version'].split('.')
+ self.bzvermajor = int(ver[0])
+ self.bzverminor = int(ver[1])
+ self.bzproxy.User.login(dict(login=user, password=passwd))
+
+ def transport(self, uri):
+ if urlparse.urlparse(uri, "http")[0] == "https":
+ return cookiesafetransport()
+ else:
+ return cookietransport()
+
+ def get_bug_comments(self, id):
+ """Return a string with all comment text for a bug."""
+ c = self.bzproxy.Bug.comments(dict(ids=[id], include_fields=['text']))
+ return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
+
+ def filter_real_bug_ids(self, bugs):
+ probe = self.bzproxy.Bug.get(dict(ids=sorted(bugs.keys()),
+ include_fields=[],
+ permissive=True))
+ for badbug in probe['faults']:
+ id = badbug['id']
+ self.ui.status(_('bug %d does not exist\n') % id)
+ del bugs[id]
+
+ def filter_cset_known_bug_ids(self, node, bugs):
+ for id in sorted(bugs.keys()):
+ if self.get_bug_comments(id).find(short(node)) != -1:
+ self.ui.status(_('bug %d already knows about changeset %s\n') %
+ (id, short(node)))
+ del bugs[id]
+
+ def updatebug(self, bugid, newstate, text, committer):
+ args = {}
+ if 'hours' in newstate:
+ args['work_time'] = newstate['hours']
+
+ if self.bzvermajor >= 4:
+ args['ids'] = [bugid]
+ args['comment'] = {'body' : text}
+ if 'fix' in newstate:
+ args['status'] = self.fixstatus
+ args['resolution'] = self.fixresolution
+ self.bzproxy.Bug.update(args)
+ else:
+ if 'fix' in newstate:
+ self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
+ "to mark bugs fixed\n"))
+ args['id'] = bugid
+ args['comment'] = text
+ self.bzproxy.Bug.add_comment(args)
+
+class bzxmlrpcemail(bzxmlrpc):
+ """Read data from Bugzilla via XMLRPC, send updates via email.
+
+ Advantages of sending updates via email:
+ 1. Comments can be added as any user, not just logged in user.
+ 2. Bug statuses or other fields not accessible via XMLRPC can
+ potentially be updated.
+
+ There is no XMLRPC function to change bug status before Bugzilla
+ 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
+ But bugs can be marked fixed via email from 3.4 onwards.
+ """
+
+ # The email interface changes subtly between 3.4 and 3.6. In 3.4,
+ # in-email fields are specified as '@<fieldname> = <value>'. In
+ # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
+ # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
+ # compatibility, but rather than rely on this use the new format for
+ # 4.0 onwards.
+
+ def __init__(self, ui):
+ bzxmlrpc.__init__(self, ui)
+
+ self.bzemail = self.ui.config('bugzilla', 'bzemail')
+ if not self.bzemail:
+ raise util.Abort(_("configuration 'bzemail' missing"))
+ mail.validateconfig(self.ui)
+
+ def makecommandline(self, fieldname, value):
+ if self.bzvermajor >= 4:
+ return "@%s %s" % (fieldname, str(value))
+ else:
+ if fieldname == "id":
+ fieldname = "bug_id"
+ return "@%s = %s" % (fieldname, str(value))
+
+ def send_bug_modify_email(self, bugid, commands, comment, committer):
+ '''send modification message to Bugzilla bug via email.
+
+ The message format is documented in the Bugzilla email_in.pl
+ specification. commands is a list of command lines, comment is the
+ comment text.
+
+ To stop users from crafting commit comments with
+ Bugzilla commands, specify the bug ID via the message body, rather
+ than the subject line, and leave a blank line after it.
+ '''
+ user = self.map_committer(committer)
+ matches = self.bzproxy.User.get(dict(match=[user]))
+ if not matches['users']:
+ user = self.ui.config('bugzilla', 'user', 'bugs')
+ matches = self.bzproxy.User.get(dict(match=[user]))
+ if not matches['users']:
+ raise util.Abort(_("default bugzilla user %s email not found") %
+ user)
+ user = matches['users'][0]['email']
+ commands.append(self.makecommandline("id", bugid))
+
+ text = "\n".join(commands) + "\n\n" + comment
+
+ _charsets = mail._charsets(self.ui)
+ user = mail.addressencode(self.ui, user, _charsets)
+ bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
+ msg = mail.mimeencode(self.ui, text, _charsets)
+ msg['From'] = user
+ msg['To'] = bzemail
+ msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
+ sendmail = mail.connect(self.ui)
+ sendmail(user, bzemail, msg.as_string())
+
+ def updatebug(self, bugid, newstate, text, committer):
+ cmds = []
+ if 'hours' in newstate:
+ cmds.append(self.makecommandline("work_time", newstate['hours']))
+ if 'fix' in newstate:
+ cmds.append(self.makecommandline("bug_status", self.fixstatus))
+ cmds.append(self.makecommandline("resolution", self.fixresolution))
+ self.send_bug_modify_email(bugid, cmds, text, committer)
+
+class bugzilla(object):
+ # supported versions of bugzilla. different versions have
+ # different schemas.
+ _versions = {
+ '2.16': bzmysql,
+ '2.18': bzmysql_2_18,
+ '3.0': bzmysql_3_0,
+ 'xmlrpc': bzxmlrpc,
+ 'xmlrpc+email': bzxmlrpcemail
+ }
+
+ _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
+ r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+ r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+
+ _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
+ r'(?:nos?\.?|num(?:ber)?s?)?\s*'
+ r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+ r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+
+ _bz = None
+
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+
+ def bz(self):
+ '''return object that knows how to talk to bugzilla version in
+ use.'''
+
+ if bugzilla._bz is None:
+ bzversion = self.ui.config('bugzilla', 'version')
+ try:
+ bzclass = bugzilla._versions[bzversion]
+ except KeyError:
+ raise util.Abort(_('bugzilla version %s not supported') %
+ bzversion)
+ bugzilla._bz = bzclass(self.ui)
+ return bugzilla._bz
+
+ def __getattr__(self, key):
+ return getattr(self.bz(), key)
+
+ _bug_re = None
+ _fix_re = None
+ _split_re = None
+
+ def find_bugs(self, ctx):
+ '''return bugs dictionary created from commit comment.
+
+ Extract bug info from changeset comments. Filter out any that are
+ not known to Bugzilla, and any that already have a reference to
+ the given changeset in their comments.
+ '''
+ if bugzilla._bug_re is None:
+ bugzilla._bug_re = re.compile(
+ self.ui.config('bugzilla', 'regexp',
+ bugzilla._default_bug_re), re.IGNORECASE)
+ bugzilla._fix_re = re.compile(
+ self.ui.config('bugzilla', 'fixregexp',
+ bugzilla._default_fix_re), re.IGNORECASE)
+ bugzilla._split_re = re.compile(r'\D+')
+ start = 0
+ hours = 0.0
+ bugs = {}
+ bugmatch = bugzilla._bug_re.search(ctx.description(), start)
+ fixmatch = bugzilla._fix_re.search(ctx.description(), start)
+ while True:
+ bugattribs = {}
+ if not bugmatch and not fixmatch:
+ break
+ if not bugmatch:
+ m = fixmatch
+ elif not fixmatch:
+ m = bugmatch
+ else:
+ if bugmatch.start() < fixmatch.start():
+ m = bugmatch
+ else:
+ m = fixmatch
+ start = m.end()
+ if m is bugmatch:
+ bugmatch = bugzilla._bug_re.search(ctx.description(), start)
+ if 'fix' in bugattribs:
+ del bugattribs['fix']
+ else:
+ fixmatch = bugzilla._fix_re.search(ctx.description(), start)
+ bugattribs['fix'] = None
+
+ try:
+ ids = m.group('ids')
+ except IndexError:
+ ids = m.group(1)
+ try:
+ hours = float(m.group('hours'))
+ bugattribs['hours'] = hours
+ except IndexError:
+ pass
+ except TypeError:
+ pass
+ except ValueError:
+ self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
+
+ for id in bugzilla._split_re.split(ids):
+ if not id:
+ continue
+ bugs[int(id)] = bugattribs
+ if bugs:
+ self.filter_real_bug_ids(bugs)
+ if bugs:
+ self.filter_cset_known_bug_ids(ctx.node(), bugs)
+ return bugs
+
+ def update(self, bugid, newstate, ctx):
+ '''update bugzilla bug with reference to changeset.'''
+
+ def webroot(root):
+ '''strip leading prefix of repo root and turn into
+ url-safe path.'''
+ count = int(self.ui.config('bugzilla', 'strip', 0))
+ root = util.pconvert(root)
+ while count > 0:
+ c = root.find('/')
+ if c == -1:
+ break
+ root = root[c + 1:]
+ count -= 1
+ return root
+
+ mapfile = self.ui.config('bugzilla', 'style')
+ tmpl = self.ui.config('bugzilla', 'template')
+ t = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, mapfile, False)
+ if not mapfile and not tmpl:
+ tmpl = _('changeset {node|short} in repo {root} refers '
+ 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
+ if tmpl:
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ t.use_template(tmpl)
+ self.ui.pushbuffer()
+ t.show(ctx, changes=ctx.changeset(),
+ bug=str(bugid),
+ hgweb=self.ui.config('web', 'baseurl'),
+ root=self.repo.root,
+ webroot=webroot(self.repo.root))
+ data = self.ui.popbuffer()
+ self.updatebug(bugid, newstate, data, util.email(ctx.user()))
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+ '''add comment to bugzilla for each changeset that refers to a
+ bugzilla bug id. only add a comment once per bug, so same change
+ seen multiple times does not fill bug with duplicate data.'''
+ if node is None:
+ raise util.Abort(_('hook type %s does not pass a changeset id') %
+ hooktype)
+ try:
+ bz = bugzilla(ui, repo)
+ ctx = repo[node]
+ bugs = bz.find_bugs(ctx)
+ if bugs:
+ for bug in bugs:
+ bz.update(bug, bugs[bug], ctx)
+ bz.notify(bugs, util.email(ctx.user()))
+ except Exception, e:
+ raise util.Abort(_('Bugzilla error: %s') % e)
diff --git a/hgext/children.py b/hgext/children.py
new file mode 100644
index 0000000..7b477aa
--- /dev/null
+++ b/hgext/children.py
@@ -0,0 +1,50 @@
+# Mercurial extension to provide the 'hg children' command
+#
+# Copyright 2007 by Intevation GmbH <intevation@intevation.de>
+#
+# Author(s):
+# Thomas Arendsen Hein <thomas@intevation.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to display child changesets (DEPRECATED)
+
+This extension is deprecated. You should use :hg:`log -r
+"children(REV)"` instead.
+'''
+
+from mercurial import cmdutil
+from mercurial.commands import templateopts
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+def children(ui, repo, file_=None, **opts):
+ """show the children of the given or working directory revision
+
+ Print the children of the working directory's revisions. If a
+ revision is given via -r/--rev, the children of that revision will
+ be printed. If a file argument is given, revision in which the
+ file was last changed (after the working directory revision or the
+ argument to --rev if given) is printed.
+ """
+ rev = opts.get('rev')
+ if file_:
+ ctx = repo.filectx(file_, changeid=rev)
+ else:
+ ctx = repo[rev]
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for cctx in ctx.children():
+ displayer.show(cctx)
+ displayer.close()
+
+cmdtable = {
+ "children":
+ (children,
+ [('r', 'rev', '',
+ _('show children of the specified revision'), _('REV')),
+ ] + templateopts,
+ _('hg children [-r REV] [FILE]')),
+}
diff --git a/hgext/churn.py b/hgext/churn.py
new file mode 100644
index 0000000..29796f0
--- /dev/null
+++ b/hgext/churn.py
@@ -0,0 +1,199 @@
+# churn.py - create a graph of revisions count grouped by template
+#
+# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
+# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to display statistics about repository history'''
+
+from mercurial.i18n import _
+from mercurial import patch, cmdutil, scmutil, util, templater, commands
+import os
+import time, datetime
+
+testedwith = 'internal'
+
+def maketemplater(ui, repo, tmpl):
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ try:
+ t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
+ except SyntaxError, inst:
+ raise util.Abort(inst.args[0])
+ t.use_template(tmpl)
+ return t
+
+def changedlines(ui, repo, ctx1, ctx2, fns):
+ added, removed = 0, 0
+ fmatch = scmutil.matchfiles(repo, fns)
+ diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+ for l in diff.split('\n'):
+ if l.startswith("+") and not l.startswith("+++ "):
+ added += 1
+ elif l.startswith("-") and not l.startswith("--- "):
+ removed += 1
+ return (added, removed)
+
+def countrate(ui, repo, amap, *pats, **opts):
+ """Calculate stats"""
+ if opts.get('dateformat'):
+ def getkey(ctx):
+ t, tz = ctx.date()
+ date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
+ return date.strftime(opts['dateformat'])
+ else:
+ tmpl = opts.get('template', '{author|email}')
+ tmpl = maketemplater(ui, repo, tmpl)
+ def getkey(ctx):
+ ui.pushbuffer()
+ tmpl.show(ctx)
+ return ui.popbuffer()
+
+ state = {'count': 0}
+ rate = {}
+ df = False
+ if opts.get('date'):
+ df = util.matchdate(opts['date'])
+
+ m = scmutil.match(repo[None], pats, opts)
+ def prep(ctx, fns):
+ rev = ctx.rev()
+ if df and not df(ctx.date()[0]): # doesn't match date format
+ return
+
+ key = getkey(ctx).strip()
+ key = amap.get(key, key) # alias remap
+ if opts.get('changesets'):
+ rate[key] = (rate.get(key, (0,))[0] + 1, 0)
+ else:
+ parents = ctx.parents()
+ if len(parents) > 1:
+ ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
+ return
+
+ ctx1 = parents[0]
+ lines = changedlines(ui, repo, ctx1, ctx, fns)
+ rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
+
+ state['count'] += 1
+ ui.progress(_('analyzing'), state['count'], total=len(repo))
+
+ for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
+ continue
+
+ ui.progress(_('analyzing'), None)
+
+ return rate
+
+
+def churn(ui, repo, *pats, **opts):
+ '''histogram of changes to the repository
+
+ This command will display a histogram representing the number
+ of changed lines or revisions, grouped according to the given
+ template. The default template will group changes by author.
+ The --dateformat option may be used to group the results by
+ date instead.
+
+ Statistics are based on the number of changed lines, or
+ alternatively the number of matching revisions if the
+ --changesets option is specified.
+
+ Examples::
+
+ # display count of changed lines for every committer
+ hg churn -t '{author|email}'
+
+ # display daily activity graph
+ hg churn -f '%H' -s -c
+
+ # display activity of developers by month
+ hg churn -f '%Y-%m' -s -c
+
+ # display count of lines changed in every year
+ hg churn -f '%Y' -s
+
+ It is possible to map alternate email addresses to a main address
+ by providing a file using the following format::
+
+ <alias email> = <actual email>
+
+ Such a file may be specified with the --aliases option, otherwise
+ a .hgchurn file will be looked for in the working directory root.
+ '''
+ def pad(s, l):
+ return (s + " " * l)[:l]
+
+ amap = {}
+ aliases = opts.get('aliases')
+ if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
+ aliases = repo.wjoin('.hgchurn')
+ if aliases:
+ for l in open(aliases, "r"):
+ try:
+ alias, actual = l.split('=' in l and '=' or None, 1)
+ amap[alias.strip()] = actual.strip()
+ except ValueError:
+ l = l.strip()
+ if l:
+ ui.warn(_("skipping malformed alias: %s\n") % l)
+ continue
+
+ rate = countrate(ui, repo, amap, *pats, **opts).items()
+ if not rate:
+ return
+
+ sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None)
+ rate.sort(key=sortkey)
+
+ # Be careful not to have a zero maxcount (issue833)
+ maxcount = float(max(sum(v) for k, v in rate)) or 1.0
+ maxname = max(len(k) for k, v in rate)
+
+ ttywidth = ui.termwidth()
+ ui.debug("assuming %i character terminal\n" % ttywidth)
+ width = ttywidth - maxname - 2 - 2 - 2
+
+ if opts.get('diffstat'):
+ width -= 15
+ def format(name, diffstat):
+ added, removed = diffstat
+ return "%s %15s %s%s\n" % (pad(name, maxname),
+ '+%d/-%d' % (added, removed),
+ ui.label('+' * charnum(added),
+ 'diffstat.inserted'),
+ ui.label('-' * charnum(removed),
+ 'diffstat.deleted'))
+ else:
+ width -= 6
+ def format(name, count):
+ return "%s %6d %s\n" % (pad(name, maxname), sum(count),
+ '*' * charnum(sum(count)))
+
+ def charnum(count):
+ return int(round(count * width / maxcount))
+
+ for name, count in rate:
+ ui.write(format(name, count))
+
+
+cmdtable = {
+ "churn":
+ (churn,
+ [('r', 'rev', [],
+ _('count rate for the specified revision or range'), _('REV')),
+ ('d', 'date', '',
+ _('count rate for revisions matching date spec'), _('DATE')),
+ ('t', 'template', '{author|email}',
+ _('template to group changesets'), _('TEMPLATE')),
+ ('f', 'dateformat', '',
+ _('strftime-compatible format for grouping by date'), _('FORMAT')),
+ ('c', 'changesets', False, _('count rate by number of changesets')),
+ ('s', 'sort', False, _('sort by key (default: sort by count)')),
+ ('', 'diffstat', False, _('display added/removed lines separately')),
+ ('', 'aliases', '',
+ _('file with email aliases'), _('FILE')),
+ ] + commands.walkopts,
+ _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]")),
+}
diff --git a/hgext/color.py b/hgext/color.py
new file mode 100644
index 0000000..22ef360
--- /dev/null
+++ b/hgext/color.py
@@ -0,0 +1,503 @@
+# color.py color output for the status and qseries commands
+#
+# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''colorize output from some commands
+
+This extension modifies the status and resolve commands to add color
+to their output to reflect file status, the qseries command to add
+color to reflect patch status (applied, unapplied, missing), and to
+diff-related commands to highlight additions, removals, diff headers,
+and trailing whitespace.
+
+Other effects in addition to color, like bold and underlined text, are
+also available. By default, the terminfo database is used to find the
+terminal codes used to change color and effect. If terminfo is not
+available, then effects are rendered with the ECMA-48 SGR control
+function (aka ANSI escape codes).
+
+Default effects may be overridden from your configuration file::
+
+ [color]
+ status.modified = blue bold underline red_background
+ status.added = green bold
+ status.removed = red bold blue_background
+ status.deleted = cyan bold underline
+ status.unknown = magenta bold underline
+ status.ignored = black bold
+
+ # 'none' turns off all effects
+ status.clean = none
+ status.copied = none
+
+ qseries.applied = blue bold underline
+ qseries.unapplied = black bold
+ qseries.missing = red bold
+
+ diff.diffline = bold
+ diff.extended = cyan bold
+ diff.file_a = red bold
+ diff.file_b = green bold
+ diff.hunk = magenta
+ diff.deleted = red
+ diff.inserted = green
+ diff.changed = white
+ diff.trailingwhitespace = bold red_background
+
+ resolve.unresolved = red bold
+ resolve.resolved = green bold
+
+ bookmarks.current = green
+
+ branches.active = none
+ branches.closed = black bold
+ branches.current = green
+ branches.inactive = none
+
+ tags.normal = green
+ tags.local = black bold
+
+The available effects in terminfo mode are 'blink', 'bold', 'dim',
+'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
+ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
+'underline'. How each is rendered depends on the terminal emulator.
+Some may not be available for a given terminal type, and will be
+silently ignored.
+
+Note that on some systems, terminfo mode may cause problems when using
+color with the pager extension and less -R. less with the -R option
+will only display ECMA-48 color codes, and terminfo mode may sometimes
+emit codes that less doesn't understand. You can work around this by
+either using ansi mode (or auto mode), or by using less -r (which will
+pass through all terminal control codes, not just color control
+codes).
+
+Because there are only eight standard colors, this module allows you
+to define color names for other color slots which might be available
+for your terminal type, assuming terminfo mode. For instance::
+
+ color.brightblue = 12
+ color.pink = 207
+ color.orange = 202
+
+to set 'brightblue' to color slot 12 (useful for 16 color terminals
+that have brighter colors defined in the upper eight) and, 'pink' and
+'orange' to colors in 256-color xterm's default color cube. These
+defined colors may then be used as any of the pre-defined eight,
+including appending '_background' to set the background to that color.
+
+By default, the color extension will use ANSI mode (or win32 mode on
+Windows) if it detects a terminal. To override auto mode (to enable
+terminfo mode, for example), set the following configuration option::
+
+ [color]
+ mode = terminfo
+
+Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
+disable color.
+'''
+
+import os
+
+from mercurial import commands, dispatch, extensions, ui as uimod, util
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+# start and stop parameters for effects
+_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
+ 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
+ 'italic': 3, 'underline': 4, 'inverse': 7,
+ 'black_background': 40, 'red_background': 41,
+ 'green_background': 42, 'yellow_background': 43,
+ 'blue_background': 44, 'purple_background': 45,
+ 'cyan_background': 46, 'white_background': 47}
+
+def _terminfosetup(ui, mode):
+ '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
+
+ global _terminfo_params
+ # If we failed to load curses, we go ahead and return.
+ if not _terminfo_params:
+ return
+ # Otherwise, see what the config file says.
+ if mode not in ('auto', 'terminfo'):
+ return
+
+ _terminfo_params.update((key[6:], (False, int(val)))
+ for key, val in ui.configitems('color')
+ if key.startswith('color.'))
+
+ try:
+ curses.setupterm()
+ except curses.error, e:
+ _terminfo_params = {}
+ return
+
+ for key, (b, e) in _terminfo_params.items():
+ if not b:
+ continue
+ if not curses.tigetstr(e):
+ # Most terminals don't support dim, invis, etc, so don't be
+ # noisy and use ui.debug().
+ ui.debug("no terminfo entry for %s\n" % e)
+ del _terminfo_params[key]
+ if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
+ # Only warn about missing terminfo entries if we explicitly asked for
+ # terminfo mode.
+ if mode == "terminfo":
+ ui.warn(_("no terminfo entry for setab/setaf: reverting to "
+ "ECMA-48 color\n"))
+ _terminfo_params = {}
+
+def _modesetup(ui, opts):
+ global _terminfo_params
+
+ coloropt = opts['color']
+ auto = coloropt == 'auto'
+ always = not auto and util.parsebool(coloropt)
+ if not always and not auto:
+ return None
+
+ formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
+
+ mode = ui.config('color', 'mode', 'auto')
+ realmode = mode
+ if mode == 'auto':
+ if os.name == 'nt' and 'TERM' not in os.environ:
+ # looks line a cmd.exe console, use win32 API or nothing
+ realmode = 'win32'
+ else:
+ realmode = 'ansi'
+
+ if realmode == 'win32':
+ _terminfo_params = {}
+ if not w32effects:
+ if mode == 'win32':
+ # only warn if color.mode is explicitly set to win32
+ ui.warn(_('warning: failed to set color mode to %s\n') % mode)
+ return None
+ _effects.update(w32effects)
+ elif realmode == 'ansi':
+ _terminfo_params = {}
+ elif realmode == 'terminfo':
+ _terminfosetup(ui, mode)
+ if not _terminfo_params:
+ if mode == 'terminfo':
+ ## FIXME Shouldn't we return None in this case too?
+ # only warn if color.mode is explicitly set to win32
+ ui.warn(_('warning: failed to set color mode to %s\n') % mode)
+ realmode = 'ansi'
+ else:
+ return None
+
+ if always or (auto and formatted):
+ return realmode
+ return None
+
+try:
+ import curses
+ # Mapping from effect name to terminfo attribute name or color number.
+ # This will also force-load the curses module.
+ _terminfo_params = {'none': (True, 'sgr0'),
+ 'standout': (True, 'smso'),
+ 'underline': (True, 'smul'),
+ 'reverse': (True, 'rev'),
+ 'inverse': (True, 'rev'),
+ 'blink': (True, 'blink'),
+ 'dim': (True, 'dim'),
+ 'bold': (True, 'bold'),
+ 'invisible': (True, 'invis'),
+ 'italic': (True, 'sitm'),
+ 'black': (False, curses.COLOR_BLACK),
+ 'red': (False, curses.COLOR_RED),
+ 'green': (False, curses.COLOR_GREEN),
+ 'yellow': (False, curses.COLOR_YELLOW),
+ 'blue': (False, curses.COLOR_BLUE),
+ 'magenta': (False, curses.COLOR_MAGENTA),
+ 'cyan': (False, curses.COLOR_CYAN),
+ 'white': (False, curses.COLOR_WHITE)}
+except ImportError:
+ _terminfo_params = False
+
+_styles = {'grep.match': 'red bold',
+ 'bookmarks.current': 'green',
+ 'branches.active': 'none',
+ 'branches.closed': 'black bold',
+ 'branches.current': 'green',
+ 'branches.inactive': 'none',
+ 'diff.changed': 'white',
+ 'diff.deleted': 'red',
+ 'diff.diffline': 'bold',
+ 'diff.extended': 'cyan bold',
+ 'diff.file_a': 'red bold',
+ 'diff.file_b': 'green bold',
+ 'diff.hunk': 'magenta',
+ 'diff.inserted': 'green',
+ 'diff.trailingwhitespace': 'bold red_background',
+ 'diffstat.deleted': 'red',
+ 'diffstat.inserted': 'green',
+ 'ui.prompt': 'yellow',
+ 'log.changeset': 'yellow',
+ 'resolve.resolved': 'green bold',
+ 'resolve.unresolved': 'red bold',
+ 'status.added': 'green bold',
+ 'status.clean': 'none',
+ 'status.copied': 'none',
+ 'status.deleted': 'cyan bold underline',
+ 'status.ignored': 'black bold',
+ 'status.modified': 'blue bold',
+ 'status.removed': 'red bold',
+ 'status.unknown': 'magenta bold underline',
+ 'tags.normal': 'green',
+ 'tags.local': 'black bold'}
+
+
+def _effect_str(effect):
+ '''Helper function for render_effects().'''
+
+ bg = False
+ if effect.endswith('_background'):
+ bg = True
+ effect = effect[:-11]
+ attr, val = _terminfo_params[effect]
+ if attr:
+ return curses.tigetstr(val)
+ elif bg:
+ return curses.tparm(curses.tigetstr('setab'), val)
+ else:
+ return curses.tparm(curses.tigetstr('setaf'), val)
+
+def render_effects(text, effects):
+ 'Wrap text in commands to turn on each effect.'
+ if not text:
+ return text
+ if not _terminfo_params:
+ start = [str(_effects[e]) for e in ['none'] + effects.split()]
+ start = '\033[' + ';'.join(start) + 'm'
+ stop = '\033[' + str(_effects['none']) + 'm'
+ else:
+ start = ''.join(_effect_str(effect)
+ for effect in ['none'] + effects.split())
+ stop = _effect_str('none')
+ return ''.join([start, text, stop])
+
+def extstyles():
+ for name, ext in extensions.extensions():
+ _styles.update(getattr(ext, 'colortable', {}))
+
+def configstyles(ui):
+ for status, cfgeffects in ui.configitems('color'):
+ if '.' not in status or status.startswith('color.'):
+ continue
+ cfgeffects = ui.configlist('color', status)
+ if cfgeffects:
+ good = []
+ for e in cfgeffects:
+ if not _terminfo_params and e in _effects:
+ good.append(e)
+ elif e in _terminfo_params or e[:-11] in _terminfo_params:
+ good.append(e)
+ else:
+ ui.warn(_("ignoring unknown color/effect %r "
+ "(configured in color.%s)\n")
+ % (e, status))
+ _styles[status] = ' '.join(good)
+
+class colorui(uimod.ui):
+ def popbuffer(self, labeled=False):
+ if labeled:
+ return ''.join(self.label(a, label) for a, label
+ in self._buffers.pop())
+ return ''.join(a for a, label in self._buffers.pop())
+
+ _colormode = 'ansi'
+ def write(self, *args, **opts):
+ label = opts.get('label', '')
+ if self._buffers:
+ self._buffers[-1].extend([(str(a), label) for a in args])
+ elif self._colormode == 'win32':
+ for a in args:
+ win32print(a, super(colorui, self).write, **opts)
+ else:
+ return super(colorui, self).write(
+ *[self.label(str(a), label) for a in args], **opts)
+
+ def write_err(self, *args, **opts):
+ label = opts.get('label', '')
+ if self._colormode == 'win32':
+ for a in args:
+ win32print(a, super(colorui, self).write_err, **opts)
+ else:
+ return super(colorui, self).write_err(
+ *[self.label(str(a), label) for a in args], **opts)
+
+ def label(self, msg, label):
+ effects = []
+ for l in label.split():
+ s = _styles.get(l, '')
+ if s:
+ effects.append(s)
+ effects = ' '.join(effects)
+ if effects:
+ return '\n'.join([render_effects(s, effects)
+ for s in msg.split('\n')])
+ return msg
+
+
+def uisetup(ui):
+ global _terminfo_params
+ if ui.plain():
+ return
+ def colorcmd(orig, ui_, opts, cmd, cmdfunc):
+ mode = _modesetup(ui_, opts)
+ if mode:
+ colorui._colormode = mode
+ if not issubclass(ui_.__class__, colorui):
+ colorui.__bases__ = (ui_.__class__,)
+ ui_.__class__ = colorui
+ extstyles()
+ configstyles(ui_)
+ return orig(ui_, opts, cmd, cmdfunc)
+ extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
+
+def extsetup(ui):
+ commands.globalopts.append(
+ ('', 'color', 'auto',
+ # i18n: 'always', 'auto', and 'never' are keywords and should
+ # not be translated
+ _("when to colorize (boolean, always, auto, or never)"),
+ _('TYPE')))
+
+if os.name != 'nt':
+ w32effects = None
+else:
+ import re, ctypes
+
+ _kernel32 = ctypes.windll.kernel32
+
+ _WORD = ctypes.c_ushort
+
+ _INVALID_HANDLE_VALUE = -1
+
+ class _COORD(ctypes.Structure):
+ _fields_ = [('X', ctypes.c_short),
+ ('Y', ctypes.c_short)]
+
+ class _SMALL_RECT(ctypes.Structure):
+ _fields_ = [('Left', ctypes.c_short),
+ ('Top', ctypes.c_short),
+ ('Right', ctypes.c_short),
+ ('Bottom', ctypes.c_short)]
+
+ class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+ _fields_ = [('dwSize', _COORD),
+ ('dwCursorPosition', _COORD),
+ ('wAttributes', _WORD),
+ ('srWindow', _SMALL_RECT),
+ ('dwMaximumWindowSize', _COORD)]
+
+ _STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
+ _STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
+
+ _FOREGROUND_BLUE = 0x0001
+ _FOREGROUND_GREEN = 0x0002
+ _FOREGROUND_RED = 0x0004
+ _FOREGROUND_INTENSITY = 0x0008
+
+ _BACKGROUND_BLUE = 0x0010
+ _BACKGROUND_GREEN = 0x0020
+ _BACKGROUND_RED = 0x0040
+ _BACKGROUND_INTENSITY = 0x0080
+
+ _COMMON_LVB_REVERSE_VIDEO = 0x4000
+ _COMMON_LVB_UNDERSCORE = 0x8000
+
+ # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
+ w32effects = {
+ 'none': -1,
+ 'black': 0,
+ 'red': _FOREGROUND_RED,
+ 'green': _FOREGROUND_GREEN,
+ 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
+ 'blue': _FOREGROUND_BLUE,
+ 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
+ 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
+ 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
+ 'bold': _FOREGROUND_INTENSITY,
+ 'black_background': 0x100, # unused value > 0x0f
+ 'red_background': _BACKGROUND_RED,
+ 'green_background': _BACKGROUND_GREEN,
+ 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
+ 'blue_background': _BACKGROUND_BLUE,
+ 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
+ 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
+ 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
+ _BACKGROUND_BLUE),
+ 'bold_background': _BACKGROUND_INTENSITY,
+ 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
+ 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
+ }
+
+ passthrough = set([_FOREGROUND_INTENSITY,
+ _BACKGROUND_INTENSITY,
+ _COMMON_LVB_UNDERSCORE,
+ _COMMON_LVB_REVERSE_VIDEO])
+
+ stdout = _kernel32.GetStdHandle(
+ _STD_OUTPUT_HANDLE) # don't close the handle returned
+ if stdout is None or stdout == _INVALID_HANDLE_VALUE:
+ w32effects = None
+ else:
+ csbi = _CONSOLE_SCREEN_BUFFER_INFO()
+ if not _kernel32.GetConsoleScreenBufferInfo(
+ stdout, ctypes.byref(csbi)):
+ # stdout may not support GetConsoleScreenBufferInfo()
+ # when called from subprocess or redirected
+ w32effects = None
+ else:
+ origattr = csbi.wAttributes
+ ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
+ re.MULTILINE | re.DOTALL)
+
+ def win32print(text, orig, **opts):
+ label = opts.get('label', '')
+ attr = origattr
+
+ def mapcolor(val, attr):
+ if val == -1:
+ return origattr
+ elif val in passthrough:
+ return attr | val
+ elif val > 0x0f:
+ return (val & 0x70) | (attr & 0x8f)
+ else:
+ return (val & 0x07) | (attr & 0xf8)
+
+ # determine console attributes based on labels
+ for l in label.split():
+ style = _styles.get(l, '')
+ for effect in style.split():
+ attr = mapcolor(w32effects[effect], attr)
+
+ # hack to ensure regexp finds data
+ if not text.startswith('\033['):
+ text = '\033[m' + text
+
+ # Look for ANSI-like codes embedded in text
+ m = re.match(ansire, text)
+
+ try:
+ while m:
+ for sattr in m.group(1).split(';'):
+ if sattr:
+ attr = mapcolor(int(sattr), attr)
+ _kernel32.SetConsoleTextAttribute(stdout, attr)
+ orig(m.group(2), **opts)
+ m = re.match(ansire, m.group(3))
+ finally:
+ # Explicity reset original attributes
+ _kernel32.SetConsoleTextAttribute(stdout, origattr)
diff --git a/hgext/convert/__init__.py b/hgext/convert/__init__.py
new file mode 100644
index 0000000..e53c82c
--- /dev/null
+++ b/hgext/convert/__init__.py
@@ -0,0 +1,370 @@
+# convert.py Foreign SCM converter
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''import revisions from foreign VCS repositories into Mercurial'''
+
+import convcmd
+import cvsps
+import subversion
+from mercurial import commands, templatekw
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+# Commands definition was moved elsewhere to ease demandload job.
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+ """convert a foreign SCM repository to a Mercurial one.
+
+ Accepted source formats [identifiers]:
+
+ - Mercurial [hg]
+ - CVS [cvs]
+ - Darcs [darcs]
+ - git [git]
+ - Subversion [svn]
+ - Monotone [mtn]
+ - GNU Arch [gnuarch]
+ - Bazaar [bzr]
+ - Perforce [p4]
+
+ Accepted destination formats [identifiers]:
+
+ - Mercurial [hg]
+ - Subversion [svn] (history on branches is not preserved)
+
+ If no revision is given, all revisions will be converted.
+ Otherwise, convert will only import up to the named revision
+ (given in a format understood by the source).
+
+ If no destination directory name is specified, it defaults to the
+ basename of the source with ``-hg`` appended. If the destination
+ repository doesn't exist, it will be created.
+
+ By default, all sources except Mercurial will use --branchsort.
+ Mercurial uses --sourcesort to preserve original revision numbers
+ order. Sort modes have the following effects:
+
+ --branchsort convert from parent to child revision when possible,
+ which means branches are usually converted one after
+ the other. It generates more compact repositories.
+
+ --datesort sort revisions by date. Converted repositories have
+ good-looking changelogs but are often an order of
+ magnitude larger than the same ones generated by
+ --branchsort.
+
+ --sourcesort try to preserve source revisions order, only
+ supported by Mercurial sources.
+
+ If ``REVMAP`` isn't given, it will be put in a default location
+ (``<dest>/.hg/shamap`` by default). The ``REVMAP`` is a simple
+ text file that maps each source commit ID to the destination ID
+ for that revision, like so::
+
+ <source ID> <destination ID>
+
+ If the file doesn't exist, it's automatically created. It's
+ updated on each commit copied, so :hg:`convert` can be interrupted
+ and can be run repeatedly to copy new commits.
+
+ The authormap is a simple text file that maps each source commit
+ author to a destination commit author. It is handy for source SCMs
+ that use unix logins to identify authors (eg: CVS). One line per
+ author mapping and the line format is::
+
+ source author = destination author
+
+ Empty lines and lines starting with a ``#`` are ignored.
+
+ The filemap is a file that allows filtering and remapping of files
+ and directories. Each line can contain one of the following
+ directives::
+
+ include path/to/file-or-dir
+
+ exclude path/to/file-or-dir
+
+ rename path/to/source path/to/destination
+
+ Comment lines start with ``#``. A specified path matches if it
+ equals the full relative name of a file or one of its parent
+ directories. The ``include`` or ``exclude`` directive with the
+ longest matching path applies, so line order does not matter.
+
+ The ``include`` directive causes a file, or all files under a
+ directory, to be included in the destination repository, and the
+ exclusion of all other files and directories not explicitly
+ included. The ``exclude`` directive causes files or directories to
+ be omitted. The ``rename`` directive renames a file or directory if
+ it is converted. To rename from a subdirectory into the root of
+ the repository, use ``.`` as the path to rename to.
+
+ The splicemap is a file that allows insertion of synthetic
+ history, letting you specify the parents of a revision. This is
+ useful if you want to e.g. give a Subversion merge two parents, or
+ graft two disconnected series of history together. Each entry
+ contains a key, followed by a space, followed by one or two
+ comma-separated values::
+
+ key parent1, parent2
+
+ The key is the revision ID in the source
+ revision control system whose parents should be modified (same
+ format as a key in .hg/shamap). The values are the revision IDs
+ (in either the source or destination revision control system) that
+ should be used as the new parents for that node. For example, if
+ you have merged "release-1.0" into "trunk", then you should
+ specify the revision on "trunk" as the first parent and the one on
+ the "release-1.0" branch as the second.
+
+ The branchmap is a file that allows you to rename a branch when it is
+ being brought in from whatever external repository. When used in
+ conjunction with a splicemap, it allows for a powerful combination
+ to help fix even the most badly mismanaged repositories and turn them
+ into nicely structured Mercurial repositories. The branchmap contains
+ lines of the form::
+
+ original_branch_name new_branch_name
+
+ where "original_branch_name" is the name of the branch in the
+ source repository, and "new_branch_name" is the name of the branch
+ is the destination repository. No whitespace is allowed in the
+ branch names. This can be used to (for instance) move code in one
+ repository from "default" to a named branch.
+
+ Mercurial Source
+ ################
+
+ The Mercurial source recognizes the following configuration
+ options, which you can set on the command line with ``--config``:
+
+ :convert.hg.ignoreerrors: ignore integrity errors when reading.
+ Use it to fix Mercurial repositories with missing revlogs, by
+ converting from and to Mercurial. Default is False.
+
+ :convert.hg.saverev: store original revision ID in changeset
+ (forces target IDs to change). It takes a boolean argument and
+ defaults to False.
+
+ :convert.hg.startrev: convert start revision and its descendants.
+ It takes a hg revision identifier and defaults to 0.
+
+ CVS Source
+ ##########
+
+ CVS source will use a sandbox (i.e. a checked-out copy) from CVS
+ to indicate the starting point of what will be converted. Direct
+ access to the repository files is not needed, unless of course the
+ repository is ``:local:``. The conversion uses the top level
+ directory in the sandbox to find the CVS repository, and then uses
+ CVS rlog commands to find files to convert. This means that unless
+ a filemap is given, all files under the starting directory will be
+ converted, and that any directory reorganization in the CVS
+ sandbox is ignored.
+
+ The following options can be used with ``--config``:
+
+ :convert.cvsps.cache: Set to False to disable remote log caching,
+ for testing and debugging purposes. Default is True.
+
+ :convert.cvsps.fuzz: Specify the maximum time (in seconds) that is
+ allowed between commits with identical user and log message in
+ a single changeset. When very large files were checked in as
+ part of a changeset then the default may not be long enough.
+ The default is 60.
+
+ :convert.cvsps.mergeto: Specify a regular expression to which
+ commit log messages are matched. If a match occurs, then the
+ conversion process will insert a dummy revision merging the
+ branch on which this log message occurs to the branch
+ indicated in the regex. Default is ``{{mergetobranch
+ ([-\\w]+)}}``
+
+ :convert.cvsps.mergefrom: Specify a regular expression to which
+ commit log messages are matched. If a match occurs, then the
+ conversion process will add the most recent revision on the
+ branch indicated in the regex as the second parent of the
+ changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
+
+ :hook.cvslog: Specify a Python function to be called at the end of
+ gathering the CVS log. The function is passed a list with the
+ log entries, and can modify the entries in-place, or add or
+ delete them.
+
+ :hook.cvschangesets: Specify a Python function to be called after
+ the changesets are calculated from the CVS log. The
+ function is passed a list with the changeset entries, and can
+ modify the changesets in-place, or add or delete them.
+
+ An additional "debugcvsps" Mercurial command allows the builtin
+ changeset merging code to be run without doing a conversion. Its
+ parameters and output are similar to that of cvsps 2.1. Please see
+ the command help for more details.
+
+ Subversion Source
+ #################
+
+ Subversion source detects classical trunk/branches/tags layouts.
+ By default, the supplied ``svn://repo/path/`` source URL is
+ converted as a single branch. If ``svn://repo/path/trunk`` exists
+ it replaces the default branch. If ``svn://repo/path/branches``
+ exists, its subdirectories are listed as possible branches. If
+ ``svn://repo/path/tags`` exists, it is looked for tags referencing
+ converted branches. Default ``trunk``, ``branches`` and ``tags``
+ values can be overridden with following options. Set them to paths
+ relative to the source URL, or leave them blank to disable auto
+ detection.
+
+ The following options can be set with ``--config``:
+
+ :convert.svn.branches: specify the directory containing branches.
+ The default is ``branches``.
+
+ :convert.svn.tags: specify the directory containing tags. The
+ default is ``tags``.
+
+ :convert.svn.trunk: specify the name of the trunk branch. The
+ default is ``trunk``.
+
+ Source history can be retrieved starting at a specific revision,
+ instead of being integrally converted. Only single branch
+ conversions are supported.
+
+ :convert.svn.startrev: specify start Subversion revision number.
+ The default is 0.
+
+ Perforce Source
+ ###############
+
+ The Perforce (P4) importer can be given a p4 depot path or a
+ client specification as source. It will convert all files in the
+ source to a flat Mercurial repository, ignoring labels, branches
+ and integrations. Note that when a depot path is given you then
+ usually should specify a target directory, because otherwise the
+ target may be named ``...-hg``.
+
+ It is possible to limit the amount of source history to be
+ converted by specifying an initial Perforce revision:
+
+ :convert.p4.startrev: specify initial Perforce revision (a
+ Perforce changelist number).
+
+ Mercurial Destination
+ #####################
+
+ The following options are supported:
+
+ :convert.hg.clonebranches: dispatch source branches in separate
+ clones. The default is False.
+
+ :convert.hg.tagsbranch: branch name for tag revisions, defaults to
+ ``default``.
+
+ :convert.hg.usebranchnames: preserve branch names. The default is
+ True.
+ """
+ return convcmd.convert(ui, src, dest, revmapfile, **opts)
+
+def debugsvnlog(ui, **opts):
+ return subversion.debugsvnlog(ui, **opts)
+
+def debugcvsps(ui, *args, **opts):
+ '''create changeset information from CVS
+
+ This command is intended as a debugging tool for the CVS to
+ Mercurial converter, and can be used as a direct replacement for
+ cvsps.
+
+ Hg debugcvsps reads the CVS rlog for current directory (or any
+ named directory) in the CVS repository, and converts the log to a
+ series of changesets based on matching commit log entries and
+ dates.'''
+ return cvsps.debugcvsps(ui, *args, **opts)
+
+commands.norepo += " convert debugsvnlog debugcvsps"
+
+cmdtable = {
+ "convert":
+ (convert,
+ [('', 'authors', '',
+ _('username mapping filename (DEPRECATED, use --authormap instead)'),
+ _('FILE')),
+ ('s', 'source-type', '',
+ _('source repository type'), _('TYPE')),
+ ('d', 'dest-type', '',
+ _('destination repository type'), _('TYPE')),
+ ('r', 'rev', '',
+ _('import up to target revision REV'), _('REV')),
+ ('A', 'authormap', '',
+ _('remap usernames using this file'), _('FILE')),
+ ('', 'filemap', '',
+ _('remap file names using contents of file'), _('FILE')),
+ ('', 'splicemap', '',
+ _('splice synthesized history into place'), _('FILE')),
+ ('', 'branchmap', '',
+ _('change branch names while converting'), _('FILE')),
+ ('', 'branchsort', None, _('try to sort changesets by branches')),
+ ('', 'datesort', None, _('try to sort changesets by date')),
+ ('', 'sourcesort', None, _('preserve source changesets order'))],
+ _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
+ "debugsvnlog":
+ (debugsvnlog,
+ [],
+ 'hg debugsvnlog'),
+ "debugcvsps":
+ (debugcvsps,
+ [
+ # Main options shared with cvsps-2.1
+ ('b', 'branches', [], _('only return changes on specified branches')),
+ ('p', 'prefix', '', _('prefix to remove from file names')),
+ ('r', 'revisions', [],
+ _('only return changes after or between specified tags')),
+ ('u', 'update-cache', None, _("update cvs log cache")),
+ ('x', 'new-cache', None, _("create new cvs log cache")),
+ ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
+ ('', 'root', '', _('specify cvsroot')),
+ # Options specific to builtin cvsps
+ ('', 'parents', '', _('show parent changesets')),
+ ('', 'ancestors', '',
+ _('show current changeset in ancestor branches')),
+ # Options that are ignored for compatibility with cvsps-2.1
+ ('A', 'cvs-direct', None, _('ignored for compatibility')),
+ ],
+ _('hg debugcvsps [OPTION]... [PATH]...')),
+}
+
+def kwconverted(ctx, name):
+ rev = ctx.extra().get('convert_revision', '')
+ if rev.startswith('svn:'):
+ if name == 'svnrev':
+ return str(subversion.revsplit(rev)[2])
+ elif name == 'svnpath':
+ return subversion.revsplit(rev)[1]
+ elif name == 'svnuuid':
+ return subversion.revsplit(rev)[0]
+ return rev
+
+def kwsvnrev(repo, ctx, **args):
+ """:svnrev: String. Converted subversion revision number."""
+ return kwconverted(ctx, 'svnrev')
+
+def kwsvnpath(repo, ctx, **args):
+ """:svnpath: String. Converted subversion revision project path."""
+ return kwconverted(ctx, 'svnpath')
+
+def kwsvnuuid(repo, ctx, **args):
+ """:svnuuid: String. Converted subversion revision repository identifier."""
+ return kwconverted(ctx, 'svnuuid')
+
+def extsetup(ui):
+ templatekw.keywords['svnrev'] = kwsvnrev
+ templatekw.keywords['svnpath'] = kwsvnpath
+ templatekw.keywords['svnuuid'] = kwsvnuuid
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
diff --git a/hgext/convert/bzr.py b/hgext/convert/bzr.py
new file mode 100644
index 0000000..5eef902
--- /dev/null
+++ b/hgext/convert/bzr.py
@@ -0,0 +1,285 @@
+# bzr.py - bzr support for the convert extension
+#
+# Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
+# it cannot access 'bar' repositories, but they were never used very much
+
+import os
+from mercurial import demandimport
+# these do not work with demandimport, blacklist
+demandimport.ignore.extend([
+ 'bzrlib.transactions',
+ 'bzrlib.urlutils',
+ 'ElementPath',
+ ])
+
+from mercurial.i18n import _
+from mercurial import util
+from common import NoRepo, commit, converter_source
+
+try:
+ # bazaar imports
+ from bzrlib import bzrdir, revision, errors
+ from bzrlib.revisionspec import RevisionSpec
+except ImportError:
+ pass
+
+supportedkinds = ('file', 'symlink')
+
+class bzr_source(converter_source):
+ """Reads Bazaar repositories by using the Bazaar Python libraries"""
+
+ def __init__(self, ui, path, rev=None):
+ super(bzr_source, self).__init__(ui, path, rev=rev)
+
+ if not os.path.exists(os.path.join(path, '.bzr')):
+ raise NoRepo(_('%s does not look like a Bazaar repository')
+ % path)
+
+ try:
+ # access bzrlib stuff
+ bzrdir
+ except NameError:
+ raise NoRepo(_('Bazaar modules could not be loaded'))
+
+ path = os.path.abspath(path)
+ self._checkrepotype(path)
+ try:
+ self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
+ except errors.NoRepositoryPresent:
+ raise NoRepo(_('%s does not look like a Bazaar repository')
+ % path)
+ self._parentids = {}
+
+ def _checkrepotype(self, path):
+ # Lightweight checkouts detection is informational but probably
+ # fragile at API level. It should not terminate the conversion.
+ try:
+ from bzrlib import bzrdir
+ dir = bzrdir.BzrDir.open_containing(path)[0]
+ try:
+ tree = dir.open_workingtree(recommend_upgrade=False)
+ branch = tree.branch
+ except (errors.NoWorkingTree, errors.NotLocalUrl):
+ tree = None
+ branch = dir.open_branch()
+ if (tree is not None and tree.bzrdir.root_transport.base !=
+ branch.bzrdir.root_transport.base):
+ self.ui.warn(_('warning: lightweight checkouts may cause '
+ 'conversion failures, try with a regular '
+ 'branch instead.\n'))
+ except Exception:
+ self.ui.note(_('bzr source type could not be determined\n'))
+
+ def before(self):
+ """Before the conversion begins, acquire a read lock
+ for all the operations that might need it. Fortunately
+ read locks don't block other reads or writes to the
+ repository, so this shouldn't have any impact on the usage of
+ the source repository.
+
+ The alternative would be locking on every operation that
+ needs locks (there are currently two: getting the file and
+ getting the parent map) and releasing immediately after,
+ but this approach can take even 40% longer."""
+ self.sourcerepo.lock_read()
+
+ def after(self):
+ self.sourcerepo.unlock()
+
+ def _bzrbranches(self):
+ return self.sourcerepo.find_branches(using=True)
+
+ def getheads(self):
+ if not self.rev:
+ # Set using=True to avoid nested repositories (see issue3254)
+ heads = sorted([b.last_revision() for b in self._bzrbranches()])
+ else:
+ revid = None
+ for branch in self._bzrbranches():
+ try:
+ r = RevisionSpec.from_string(self.rev)
+ info = r.in_history(branch)
+ except errors.BzrError:
+ pass
+ revid = info.rev_id
+ if revid is None:
+ raise util.Abort(_('%s is not a valid revision') % self.rev)
+ heads = [revid]
+ # Empty repositories return 'null:', which cannot be retrieved
+ heads = [h for h in heads if h != 'null:']
+ return heads
+
+ def getfile(self, name, rev):
+ revtree = self.sourcerepo.revision_tree(rev)
+ fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
+ kind = None
+ if fileid is not None:
+ kind = revtree.kind(fileid)
+ if kind not in supportedkinds:
+ # the file is not available anymore - was deleted
+ raise IOError(_('%s is not available in %s anymore') %
+ (name, rev))
+ mode = self._modecache[(name, rev)]
+ if kind == 'symlink':
+ target = revtree.get_symlink_target(fileid)
+ if target is None:
+ raise util.Abort(_('%s.%s symlink has no target')
+ % (name, rev))
+ return target, mode
+ else:
+ sio = revtree.get_file(fileid)
+ return sio.read(), mode
+
+ def getchanges(self, version):
+ # set up caches: modecache and revtree
+ self._modecache = {}
+ self._revtree = self.sourcerepo.revision_tree(version)
+ # get the parentids from the cache
+ parentids = self._parentids.pop(version)
+ # only diff against first parent id
+ prevtree = self.sourcerepo.revision_tree(parentids[0])
+ return self._gettreechanges(self._revtree, prevtree)
+
+ def getcommit(self, version):
+ rev = self.sourcerepo.get_revision(version)
+ # populate parent id cache
+ if not rev.parent_ids:
+ parents = []
+ self._parentids[version] = (revision.NULL_REVISION,)
+ else:
+ parents = self._filterghosts(rev.parent_ids)
+ self._parentids[version] = parents
+
+ branch = self.recode(rev.properties.get('branch-nick', u'default'))
+ if branch == 'trunk':
+ branch = 'default'
+ return commit(parents=parents,
+ date='%d %d' % (rev.timestamp, -rev.timezone),
+ author=self.recode(rev.committer),
+ desc=self.recode(rev.message),
+ branch=branch,
+ rev=version)
+
+ def gettags(self):
+ bytetags = {}
+ for branch in self._bzrbranches():
+ if not branch.supports_tags():
+ return {}
+ tagdict = branch.tags.get_tag_dict()
+ for name, rev in tagdict.iteritems():
+ bytetags[self.recode(name)] = rev
+ return bytetags
+
+ def getchangedfiles(self, rev, i):
+ self._modecache = {}
+ curtree = self.sourcerepo.revision_tree(rev)
+ if i is not None:
+ parentid = self._parentids[rev][i]
+ else:
+ # no parent id, get the empty revision
+ parentid = revision.NULL_REVISION
+
+ prevtree = self.sourcerepo.revision_tree(parentid)
+ changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
+ return changes
+
+ def _gettreechanges(self, current, origin):
+ revid = current._revision_id
+ changes = []
+ renames = {}
+ seen = set()
+ # Process the entries by reverse lexicographic name order to
+ # handle nested renames correctly, most specific first.
+ curchanges = sorted(current.iter_changes(origin),
+ key=lambda c: c[1][0] or c[1][1],
+ reverse=True)
+ for (fileid, paths, changed_content, versioned, parent, name,
+ kind, executable) in curchanges:
+
+ if paths[0] == u'' or paths[1] == u'':
+ # ignore changes to tree root
+ continue
+
+ # bazaar tracks directories, mercurial does not, so
+ # we have to rename the directory contents
+ if kind[1] == 'directory':
+ if kind[0] not in (None, 'directory'):
+ # Replacing 'something' with a directory, record it
+ # so it can be removed.
+ changes.append((self.recode(paths[0]), revid))
+
+ if kind[0] == 'directory' and None not in paths:
+ renaming = paths[0] != paths[1]
+ # neither an add nor an delete - a move
+ # rename all directory contents manually
+ subdir = origin.inventory.path2id(paths[0])
+ # get all child-entries of the directory
+ for name, entry in origin.inventory.iter_entries(subdir):
+ # hg does not track directory renames
+ if entry.kind == 'directory':
+ continue
+ frompath = self.recode(paths[0] + '/' + name)
+ if frompath in seen:
+ # Already handled by a more specific change entry
+ # This is important when you have:
+ # a => b
+ # a/c => a/c
+ # Here a/c must not be renamed into b/c
+ continue
+ seen.add(frompath)
+ if not renaming:
+ continue
+ topath = self.recode(paths[1] + '/' + name)
+ # register the files as changed
+ changes.append((frompath, revid))
+ changes.append((topath, revid))
+ # add to mode cache
+ mode = ((entry.executable and 'x')
+ or (entry.kind == 'symlink' and 's')
+ or '')
+ self._modecache[(topath, revid)] = mode
+ # register the change as move
+ renames[topath] = frompath
+
+ # no futher changes, go to the next change
+ continue
+
+ # we got unicode paths, need to convert them
+ path, topath = paths
+ if path is not None:
+ path = self.recode(path)
+ if topath is not None:
+ topath = self.recode(topath)
+ seen.add(path or topath)
+
+ if topath is None:
+ # file deleted
+ changes.append((path, revid))
+ continue
+
+ # renamed
+ if path and path != topath:
+ renames[topath] = path
+ changes.append((path, revid))
+
+ # populate the mode cache
+ kind, executable = [e[1] for e in (kind, executable)]
+ mode = ((executable and 'x') or (kind == 'symlink' and 'l')
+ or '')
+ self._modecache[(topath, revid)] = mode
+ changes.append((topath, revid))
+
+ return changes, renames
+
+ def _filterghosts(self, ids):
+ """Filters out ghost revisions which hg does not support, see
+ <http://bazaar-vcs.org/GhostRevision>
+ """
+ parentmap = self.sourcerepo.get_parent_map(ids)
+ parents = tuple([parent for parent in ids if parent in parentmap])
+ return parents
diff --git a/hgext/convert/common.py b/hgext/convert/common.py
new file mode 100644
index 0000000..e30ef2d
--- /dev/null
+++ b/hgext/convert/common.py
@@ -0,0 +1,445 @@
+# common.py - common code for the convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import base64, errno
+import os
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+
+propertycache = util.propertycache
+
+def encodeargs(args):
+ def encodearg(s):
+ lines = base64.encodestring(s)
+ lines = [l.splitlines()[0] for l in lines]
+ return ''.join(lines)
+
+ s = pickle.dumps(args)
+ return encodearg(s)
+
+def decodeargs(s):
+ s = base64.decodestring(s)
+ return pickle.loads(s)
+
+class MissingTool(Exception):
+ pass
+
+def checktool(exe, name=None, abort=True):
+ name = name or exe
+ if not util.findexe(exe):
+ exc = abort and util.Abort or MissingTool
+ raise exc(_('cannot find required "%s" tool') % name)
+
+class NoRepo(Exception):
+ pass
+
+SKIPREV = 'SKIP'
+
+class commit(object):
+ def __init__(self, author, date, desc, parents, branch=None, rev=None,
+ extra={}, sortkey=None):
+ self.author = author or 'unknown'
+ self.date = date or '0 0'
+ self.desc = desc
+ self.parents = parents
+ self.branch = branch
+ self.rev = rev
+ self.extra = extra
+ self.sortkey = sortkey
+
+class converter_source(object):
+ """Conversion source interface"""
+
+ def __init__(self, ui, path=None, rev=None):
+ """Initialize conversion source (or raise NoRepo("message")
+ exception if path is not a valid repository)"""
+ self.ui = ui
+ self.path = path
+ self.rev = rev
+
+ self.encoding = 'utf-8'
+
+ def before(self):
+ pass
+
+ def after(self):
+ pass
+
+ def setrevmap(self, revmap):
+ """set the map of already-converted revisions"""
+ pass
+
+ def getheads(self):
+ """Return a list of this repository's heads"""
+ raise NotImplementedError
+
+ def getfile(self, name, rev):
+ """Return a pair (data, mode) where data is the file content
+ as a string and mode one of '', 'x' or 'l'. rev is the
+ identifier returned by a previous call to getchanges(). Raise
+ IOError to indicate that name was deleted in rev.
+ """
+ raise NotImplementedError
+
+ def getchanges(self, version):
+ """Returns a tuple of (files, copies).
+
+ files is a sorted list of (filename, id) tuples for all files
+ changed between version and its first parent returned by
+ getcommit(). id is the source revision id of the file.
+
+ copies is a dictionary of dest: source
+ """
+ raise NotImplementedError
+
+ def getcommit(self, version):
+ """Return the commit object for version"""
+ raise NotImplementedError
+
+ def gettags(self):
+ """Return the tags as a dictionary of name: revision
+
+ Tag names must be UTF-8 strings.
+ """
+ raise NotImplementedError
+
+ def recode(self, s, encoding=None):
+ if not encoding:
+ encoding = self.encoding or 'utf-8'
+
+ if isinstance(s, unicode):
+ return s.encode("utf-8")
+ try:
+ return s.decode(encoding).encode("utf-8")
+ except UnicodeError:
+ try:
+ return s.decode("latin-1").encode("utf-8")
+ except UnicodeError:
+ return s.decode(encoding, "replace").encode("utf-8")
+
+ def getchangedfiles(self, rev, i):
+ """Return the files changed by rev compared to parent[i].
+
+ i is an index selecting one of the parents of rev. The return
+ value should be the list of files that are different in rev and
+ this parent.
+
+ If rev has no parents, i is None.
+
+ This function is only needed to support --filemap
+ """
+ raise NotImplementedError
+
+ def converted(self, rev, sinkrev):
+ '''Notify the source that a revision has been converted.'''
+ pass
+
+ def hasnativeorder(self):
+ """Return true if this source has a meaningful, native revision
+ order. For instance, Mercurial revisions are store sequentially
+ while there is no such global ordering with Darcs.
+ """
+ return False
+
+ def lookuprev(self, rev):
+ """If rev is a meaningful revision reference in source, return
+ the referenced identifier in the same format used by getcommit().
+ return None otherwise.
+ """
+ return None
+
+ def getbookmarks(self):
+ """Return the bookmarks as a dictionary of name: revision
+
+ Bookmark names are to be UTF-8 strings.
+ """
+ return {}
+
+class converter_sink(object):
+ """Conversion sink (target) interface"""
+
+ def __init__(self, ui, path):
+ """Initialize conversion sink (or raise NoRepo("message")
+ exception if path is not a valid repository)
+
+ created is a list of paths to remove if a fatal error occurs
+ later"""
+ self.ui = ui
+ self.path = path
+ self.created = []
+
+ def getheads(self):
+ """Return a list of this repository's heads"""
+ raise NotImplementedError
+
+ def revmapfile(self):
+ """Path to a file that will contain lines
+ source_rev_id sink_rev_id
+ mapping equivalent revision identifiers for each system."""
+ raise NotImplementedError
+
+ def authorfile(self):
+ """Path to a file that will contain lines
+ srcauthor=dstauthor
+ mapping equivalent authors identifiers for each system."""
+ return None
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+ """Create a revision with all changed files listed in 'files'
+ and having listed parents. 'commit' is a commit object
+ containing at a minimum the author, date, and message for this
+ changeset. 'files' is a list of (path, version) tuples,
+ 'copies' is a dictionary mapping destinations to sources,
+ 'source' is the source repository, and 'revmap' is a mapfile
+ of source revisions to converted revisions. Only getfile() and
+ lookuprev() should be called on 'source'.
+
+ Note that the sink repository is not told to update itself to
+ a particular revision (or even what that revision would be)
+ before it receives the file data.
+ """
+ raise NotImplementedError
+
+ def puttags(self, tags):
+ """Put tags into sink.
+
+ tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
+ Return a pair (tag_revision, tag_parent_revision), or (None, None)
+ if nothing was changed.
+ """
+ raise NotImplementedError
+
+ def setbranch(self, branch, pbranches):
+ """Set the current branch name. Called before the first putcommit
+ on the branch.
+ branch: branch name for subsequent commits
+ pbranches: (converted parent revision, parent branch) tuples"""
+ pass
+
+ def setfilemapmode(self, active):
+ """Tell the destination that we're using a filemap
+
+ Some converter_sources (svn in particular) can claim that a file
+ was changed in a revision, even if there was no change. This method
+ tells the destination that we're using a filemap and that it should
+ filter empty revisions.
+ """
+ pass
+
+ def before(self):
+ pass
+
+ def after(self):
+ pass
+
+ def putbookmarks(self, bookmarks):
+ """Put bookmarks into sink.
+
+ bookmarks: {bookmarkname: sink_rev_id, ...}
+ where bookmarkname is an UTF-8 string.
+ """
+ pass
+
+ def hascommit(self, rev):
+ """Return True if the sink contains rev"""
+ raise NotImplementedError
+
+class commandline(object):
+ def __init__(self, ui, command):
+ self.ui = ui
+ self.command = command
+
+ def prerun(self):
+ pass
+
+ def postrun(self):
+ pass
+
+ def _cmdline(self, cmd, closestdin, *args, **kwargs):
+ cmdline = [self.command, cmd] + list(args)
+ for k, v in kwargs.iteritems():
+ if len(k) == 1:
+ cmdline.append('-' + k)
+ else:
+ cmdline.append('--' + k.replace('_', '-'))
+ try:
+ if len(k) == 1:
+ cmdline.append('' + v)
+ else:
+ cmdline[-1] += '=' + v
+ except TypeError:
+ pass
+ cmdline = [util.shellquote(arg) for arg in cmdline]
+ if not self.ui.debugflag:
+ cmdline += ['2>', util.nulldev]
+ if closestdin:
+ cmdline += ['<', util.nulldev]
+ cmdline = ' '.join(cmdline)
+ return cmdline
+
+ def _run(self, cmd, *args, **kwargs):
+ return self._dorun(util.popen, cmd, True, *args, **kwargs)
+
+ def _run2(self, cmd, *args, **kwargs):
+ return self._dorun(util.popen2, cmd, False, *args, **kwargs)
+
+ def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs):
+ cmdline = self._cmdline(cmd, closestdin, *args, **kwargs)
+ self.ui.debug('running: %s\n' % (cmdline,))
+ self.prerun()
+ try:
+ return openfunc(cmdline)
+ finally:
+ self.postrun()
+
+ def run(self, cmd, *args, **kwargs):
+ fp = self._run(cmd, *args, **kwargs)
+ output = fp.read()
+ self.ui.debug(output)
+ return output, fp.close()
+
+ def runlines(self, cmd, *args, **kwargs):
+ fp = self._run(cmd, *args, **kwargs)
+ output = fp.readlines()
+ self.ui.debug(''.join(output))
+ return output, fp.close()
+
+ def checkexit(self, status, output=''):
+ if status:
+ if output:
+ self.ui.warn(_('%s error:\n') % self.command)
+ self.ui.warn(output)
+ msg = util.explainexit(status)[0]
+ raise util.Abort('%s %s' % (self.command, msg))
+
+ def run0(self, cmd, *args, **kwargs):
+ output, status = self.run(cmd, *args, **kwargs)
+ self.checkexit(status, output)
+ return output
+
+ def runlines0(self, cmd, *args, **kwargs):
+ output, status = self.runlines(cmd, *args, **kwargs)
+ self.checkexit(status, ''.join(output))
+ return output
+
+ @propertycache
+ def argmax(self):
+ # POSIX requires at least 4096 bytes for ARG_MAX
+ argmax = 4096
+ try:
+ argmax = os.sysconf("SC_ARG_MAX")
+ except (AttributeError, ValueError):
+ pass
+
+ # Windows shells impose their own limits on command line length,
+ # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
+ # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
+ # details about cmd.exe limitations.
+
+ # Since ARG_MAX is for command line _and_ environment, lower our limit
+ # (and make happy Windows shells while doing this).
+ return argmax // 2 - 1
+
+ def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs):
+ cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs))
+ limit = self.argmax - cmdlen
+ bytes = 0
+ fl = []
+ for fn in arglist:
+ b = len(fn) + 3
+ if bytes + b < limit or len(fl) == 0:
+ fl.append(fn)
+ bytes += b
+ else:
+ yield fl
+ fl = [fn]
+ bytes = b
+ if fl:
+ yield fl
+
+ def xargs(self, arglist, cmd, *args, **kwargs):
+ for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs):
+ self.run0(cmd, *(list(args) + l), **kwargs)
+
+class mapfile(dict):
+ def __init__(self, ui, path):
+ super(mapfile, self).__init__()
+ self.ui = ui
+ self.path = path
+ self.fp = None
+ self.order = []
+ self._read()
+
+ def _read(self):
+ if not self.path:
+ return
+ try:
+ fp = open(self.path, 'r')
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return
+ for i, line in enumerate(fp):
+ line = line.splitlines()[0].rstrip()
+ if not line:
+ # Ignore blank lines
+ continue
+ try:
+ key, value = line.rsplit(' ', 1)
+ except ValueError:
+ raise util.Abort(
+ _('syntax error in %s(%d): key/value pair expected')
+ % (self.path, i + 1))
+ if key not in self:
+ self.order.append(key)
+ super(mapfile, self).__setitem__(key, value)
+ fp.close()
+
+ def __setitem__(self, key, value):
+ if self.fp is None:
+ try:
+ self.fp = open(self.path, 'a')
+ except IOError, err:
+ raise util.Abort(_('could not open map file %r: %s') %
+ (self.path, err.strerror))
+ self.fp.write('%s %s\n' % (key, value))
+ self.fp.flush()
+ super(mapfile, self).__setitem__(key, value)
+
+ def close(self):
+ if self.fp:
+ self.fp.close()
+ self.fp = None
+
+def parsesplicemap(path):
+ """Parse a splicemap, return a child/parents dictionary."""
+ if not path:
+ return {}
+ m = {}
+ try:
+ fp = open(path, 'r')
+ for i, line in enumerate(fp):
+ line = line.splitlines()[0].rstrip()
+ if not line:
+ # Ignore blank lines
+ continue
+ try:
+ child, parents = line.split(' ', 1)
+ parents = parents.replace(',', ' ').split()
+ except ValueError:
+ raise util.Abort(_('syntax error in %s(%d): child parent1'
+ '[,parent2] expected') % (path, i + 1))
+ pp = []
+ for p in parents:
+ if p not in pp:
+ pp.append(p)
+ m[child] = pp
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ return m
diff --git a/hgext/convert/convcmd.py b/hgext/convert/convcmd.py
new file mode 100644
index 0000000..c8fe845
--- /dev/null
+++ b/hgext/convert/convcmd.py
@@ -0,0 +1,470 @@
+# convcmd - convert extension commands definition
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, MissingTool, SKIPREV, mapfile
+from cvs import convert_cvs
+from darcs import darcs_source
+from git import convert_git
+from hg import mercurial_source, mercurial_sink
+from subversion import svn_source, svn_sink
+from monotone import monotone_source
+from gnuarch import gnuarch_source
+from bzr import bzr_source
+from p4 import p4_source
+import filemap, common
+
+import os, shutil
+from mercurial import hg, util, encoding
+from mercurial.i18n import _
+
+orig_encoding = 'ascii'
+
+def recode(s):
+ if isinstance(s, unicode):
+ return s.encode(orig_encoding, 'replace')
+ else:
+ return s.decode('utf-8').encode(orig_encoding, 'replace')
+
+source_converters = [
+ ('cvs', convert_cvs, 'branchsort'),
+ ('git', convert_git, 'branchsort'),
+ ('svn', svn_source, 'branchsort'),
+ ('hg', mercurial_source, 'sourcesort'),
+ ('darcs', darcs_source, 'branchsort'),
+ ('mtn', monotone_source, 'branchsort'),
+ ('gnuarch', gnuarch_source, 'branchsort'),
+ ('bzr', bzr_source, 'branchsort'),
+ ('p4', p4_source, 'branchsort'),
+ ]
+
+sink_converters = [
+ ('hg', mercurial_sink),
+ ('svn', svn_sink),
+ ]
+
+def convertsource(ui, path, type, rev):
+ exceptions = []
+ if type and type not in [s[0] for s in source_converters]:
+ raise util.Abort(_('%s: invalid source repository type') % type)
+ for name, source, sortmode in source_converters:
+ try:
+ if not type or name == type:
+ return source(ui, path, rev), sortmode
+ except (NoRepo, MissingTool), inst:
+ exceptions.append(inst)
+ if not ui.quiet:
+ for inst in exceptions:
+ ui.write("%s\n" % inst)
+ raise util.Abort(_('%s: missing or unsupported repository') % path)
+
+def convertsink(ui, path, type):
+ if type and type not in [s[0] for s in sink_converters]:
+ raise util.Abort(_('%s: invalid destination repository type') % type)
+ for name, sink in sink_converters:
+ try:
+ if not type or name == type:
+ return sink(ui, path)
+ except NoRepo, inst:
+ ui.note(_("convert: %s\n") % inst)
+ except MissingTool, inst:
+ raise util.Abort('%s\n' % inst)
+ raise util.Abort(_('%s: unknown repository type') % path)
+
+class progresssource(object):
+ def __init__(self, ui, source, filecount):
+ self.ui = ui
+ self.source = source
+ self.filecount = filecount
+ self.retrieved = 0
+
+ def getfile(self, file, rev):
+ self.retrieved += 1
+ self.ui.progress(_('getting files'), self.retrieved,
+ item=file, total=self.filecount)
+ return self.source.getfile(file, rev)
+
+ def lookuprev(self, rev):
+ return self.source.lookuprev(rev)
+
+ def close(self):
+ self.ui.progress(_('getting files'), None)
+
+class converter(object):
+ def __init__(self, ui, source, dest, revmapfile, opts):
+
+ self.source = source
+ self.dest = dest
+ self.ui = ui
+ self.opts = opts
+ self.commitcache = {}
+ self.authors = {}
+ self.authorfile = None
+
+ # Record converted revisions persistently: maps source revision
+ # ID to target revision ID (both strings). (This is how
+ # incremental conversions work.)
+ self.map = mapfile(ui, revmapfile)
+
+ # Read first the dst author map if any
+ authorfile = self.dest.authorfile()
+ if authorfile and os.path.exists(authorfile):
+ self.readauthormap(authorfile)
+ # Extend/Override with new author map if necessary
+ if opts.get('authormap'):
+ self.readauthormap(opts.get('authormap'))
+ self.authorfile = self.dest.authorfile()
+
+ self.splicemap = common.parsesplicemap(opts.get('splicemap'))
+ self.branchmap = mapfile(ui, opts.get('branchmap'))
+
+ def walktree(self, heads):
+ '''Return a mapping that identifies the uncommitted parents of every
+ uncommitted changeset.'''
+ visit = heads
+ known = set()
+ parents = {}
+ while visit:
+ n = visit.pop(0)
+ if n in known or n in self.map:
+ continue
+ known.add(n)
+ self.ui.progress(_('scanning'), len(known), unit=_('revisions'))
+ commit = self.cachecommit(n)
+ parents[n] = []
+ for p in commit.parents:
+ parents[n].append(p)
+ visit.append(p)
+ self.ui.progress(_('scanning'), None)
+
+ return parents
+
+ def mergesplicemap(self, parents, splicemap):
+ """A splicemap redefines child/parent relationships. Check the
+ map contains valid revision identifiers and merge the new
+ links in the source graph.
+ """
+ for c in splicemap:
+ if c not in parents:
+ if not self.dest.hascommit(self.map.get(c, c)):
+ # Could be in source but not converted during this run
+ self.ui.warn(_('splice map revision %s is not being '
+ 'converted, ignoring\n') % c)
+ continue
+ pc = []
+ for p in splicemap[c]:
+ # We do not have to wait for nodes already in dest.
+ if self.dest.hascommit(self.map.get(p, p)):
+ continue
+ # Parent is not in dest and not being converted, not good
+ if p not in parents:
+ raise util.Abort(_('unknown splice map parent: %s') % p)
+ pc.append(p)
+ parents[c] = pc
+
+ def toposort(self, parents, sortmode):
+ '''Return an ordering such that every uncommitted changeset is
+ preceeded by all its uncommitted ancestors.'''
+
+ def mapchildren(parents):
+ """Return a (children, roots) tuple where 'children' maps parent
+ revision identifiers to children ones, and 'roots' is the list of
+ revisions without parents. 'parents' must be a mapping of revision
+ identifier to its parents ones.
+ """
+ visit = parents.keys()
+ seen = set()
+ children = {}
+ roots = []
+
+ while visit:
+ n = visit.pop(0)
+ if n in seen:
+ continue
+ seen.add(n)
+ # Ensure that nodes without parents are present in the
+ # 'children' mapping.
+ children.setdefault(n, [])
+ hasparent = False
+ for p in parents[n]:
+ if p not in self.map:
+ visit.append(p)
+ hasparent = True
+ children.setdefault(p, []).append(n)
+ if not hasparent:
+ roots.append(n)
+
+ return children, roots
+
+ # Sort functions are supposed to take a list of revisions which
+ # can be converted immediately and pick one
+
+ def makebranchsorter():
+ """If the previously converted revision has a child in the
+ eligible revisions list, pick it. Return the list head
+ otherwise. Branch sort attempts to minimize branch
+ switching, which is harmful for Mercurial backend
+ compression.
+ """
+ prev = [None]
+ def picknext(nodes):
+ next = nodes[0]
+ for n in nodes:
+ if prev[0] in parents[n]:
+ next = n
+ break
+ prev[0] = next
+ return next
+ return picknext
+
+ def makesourcesorter():
+ """Source specific sort."""
+ keyfn = lambda n: self.commitcache[n].sortkey
+ def picknext(nodes):
+ return sorted(nodes, key=keyfn)[0]
+ return picknext
+
+ def makedatesorter():
+ """Sort revisions by date."""
+ dates = {}
+ def getdate(n):
+ if n not in dates:
+ dates[n] = util.parsedate(self.commitcache[n].date)
+ return dates[n]
+
+ def picknext(nodes):
+ return min([(getdate(n), n) for n in nodes])[1]
+
+ return picknext
+
+ if sortmode == 'branchsort':
+ picknext = makebranchsorter()
+ elif sortmode == 'datesort':
+ picknext = makedatesorter()
+ elif sortmode == 'sourcesort':
+ picknext = makesourcesorter()
+ else:
+ raise util.Abort(_('unknown sort mode: %s') % sortmode)
+
+ children, actives = mapchildren(parents)
+
+ s = []
+ pendings = {}
+ while actives:
+ n = picknext(actives)
+ actives.remove(n)
+ s.append(n)
+
+ # Update dependents list
+ for c in children.get(n, []):
+ if c not in pendings:
+ pendings[c] = [p for p in parents[c] if p not in self.map]
+ try:
+ pendings[c].remove(n)
+ except ValueError:
+ raise util.Abort(_('cycle detected between %s and %s')
+ % (recode(c), recode(n)))
+ if not pendings[c]:
+ # Parents are converted, node is eligible
+ actives.insert(0, c)
+ pendings[c] = None
+
+ if len(s) != len(parents):
+ raise util.Abort(_("not all revisions were sorted"))
+
+ return s
+
+ def writeauthormap(self):
+ authorfile = self.authorfile
+ if authorfile:
+ self.ui.status(_('writing author map file %s\n') % authorfile)
+ ofile = open(authorfile, 'w+')
+ for author in self.authors:
+ ofile.write("%s=%s\n" % (author, self.authors[author]))
+ ofile.close()
+
+ def readauthormap(self, authorfile):
+ afile = open(authorfile, 'r')
+ for line in afile:
+
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+
+ try:
+ srcauthor, dstauthor = line.split('=', 1)
+ except ValueError:
+ msg = _('ignoring bad line in author map file %s: %s\n')
+ self.ui.warn(msg % (authorfile, line.rstrip()))
+ continue
+
+ srcauthor = srcauthor.strip()
+ dstauthor = dstauthor.strip()
+ if self.authors.get(srcauthor) in (None, dstauthor):
+ msg = _('mapping author %s to %s\n')
+ self.ui.debug(msg % (srcauthor, dstauthor))
+ self.authors[srcauthor] = dstauthor
+ continue
+
+ m = _('overriding mapping for author %s, was %s, will be %s\n')
+ self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
+
+ afile.close()
+
+ def cachecommit(self, rev):
+ commit = self.source.getcommit(rev)
+ commit.author = self.authors.get(commit.author, commit.author)
+ commit.branch = self.branchmap.get(commit.branch, commit.branch)
+ self.commitcache[rev] = commit
+ return commit
+
+ def copy(self, rev):
+ commit = self.commitcache[rev]
+
+ changes = self.source.getchanges(rev)
+ if isinstance(changes, basestring):
+ if changes == SKIPREV:
+ dest = SKIPREV
+ else:
+ dest = self.map[changes]
+ self.map[rev] = dest
+ return
+ files, copies = changes
+ pbranches = []
+ if commit.parents:
+ for prev in commit.parents:
+ if prev not in self.commitcache:
+ self.cachecommit(prev)
+ pbranches.append((self.map[prev],
+ self.commitcache[prev].branch))
+ self.dest.setbranch(commit.branch, pbranches)
+ try:
+ parents = self.splicemap[rev]
+ self.ui.status(_('spliced in %s as parents of %s\n') %
+ (parents, rev))
+ parents = [self.map.get(p, p) for p in parents]
+ except KeyError:
+ parents = [b[0] for b in pbranches]
+ source = progresssource(self.ui, self.source, len(files))
+ newnode = self.dest.putcommit(files, copies, parents, commit,
+ source, self.map)
+ source.close()
+ self.source.converted(rev, newnode)
+ self.map[rev] = newnode
+
+ def convert(self, sortmode):
+ try:
+ self.source.before()
+ self.dest.before()
+ self.source.setrevmap(self.map)
+ self.ui.status(_("scanning source...\n"))
+ heads = self.source.getheads()
+ parents = self.walktree(heads)
+ self.mergesplicemap(parents, self.splicemap)
+ self.ui.status(_("sorting...\n"))
+ t = self.toposort(parents, sortmode)
+ num = len(t)
+ c = None
+
+ self.ui.status(_("converting...\n"))
+ for i, c in enumerate(t):
+ num -= 1
+ desc = self.commitcache[c].desc
+ if "\n" in desc:
+ desc = desc.splitlines()[0]
+ # convert log message to local encoding without using
+ # tolocal() because the encoding.encoding convert()
+ # uses is 'utf-8'
+ self.ui.status("%d %s\n" % (num, recode(desc)))
+ self.ui.note(_("source: %s\n") % recode(c))
+ self.ui.progress(_('converting'), i, unit=_('revisions'),
+ total=len(t))
+ self.copy(c)
+ self.ui.progress(_('converting'), None)
+
+ tags = self.source.gettags()
+ ctags = {}
+ for k in tags:
+ v = tags[k]
+ if self.map.get(v, SKIPREV) != SKIPREV:
+ ctags[k] = self.map[v]
+
+ if c and ctags:
+ nrev, tagsparent = self.dest.puttags(ctags)
+ if nrev and tagsparent:
+ # write another hash correspondence to override the previous
+ # one so we don't end up with extra tag heads
+ tagsparents = [e for e in self.map.iteritems()
+ if e[1] == tagsparent]
+ if tagsparents:
+ self.map[tagsparents[0][0]] = nrev
+
+ bookmarks = self.source.getbookmarks()
+ cbookmarks = {}
+ for k in bookmarks:
+ v = bookmarks[k]
+ if self.map.get(v, SKIPREV) != SKIPREV:
+ cbookmarks[k] = self.map[v]
+
+ if c and cbookmarks:
+ self.dest.putbookmarks(cbookmarks)
+
+ self.writeauthormap()
+ finally:
+ self.cleanup()
+
+ def cleanup(self):
+ try:
+ self.dest.after()
+ finally:
+ self.source.after()
+ self.map.close()
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+ global orig_encoding
+ orig_encoding = encoding.encoding
+ encoding.encoding = 'UTF-8'
+
+ # support --authors as an alias for --authormap
+ if not opts.get('authormap'):
+ opts['authormap'] = opts.get('authors')
+
+ if not dest:
+ dest = hg.defaultdest(src) + "-hg"
+ ui.status(_("assuming destination %s\n") % dest)
+
+ destc = convertsink(ui, dest, opts.get('dest_type'))
+
+ try:
+ srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
+ opts.get('rev'))
+ except Exception:
+ for path in destc.created:
+ shutil.rmtree(path, True)
+ raise
+
+ sortmodes = ('branchsort', 'datesort', 'sourcesort')
+ sortmode = [m for m in sortmodes if opts.get(m)]
+ if len(sortmode) > 1:
+ raise util.Abort(_('more than one sort mode specified'))
+ sortmode = sortmode and sortmode[0] or defaultsort
+ if sortmode == 'sourcesort' and not srcc.hasnativeorder():
+ raise util.Abort(_('--sourcesort is not supported by this data source'))
+
+ fmap = opts.get('filemap')
+ if fmap:
+ srcc = filemap.filemap_source(ui, srcc, fmap)
+ destc.setfilemapmode(True)
+
+ if not revmapfile:
+ try:
+ revmapfile = destc.revmapfile()
+ except Exception:
+ revmapfile = os.path.join(destc, "map")
+
+ c = converter(ui, srcc, destc, revmapfile, opts)
+ c.convert(sortmode)
+
diff --git a/hgext/convert/cvs.py b/hgext/convert/cvs.py
new file mode 100644
index 0000000..38b1d34
--- /dev/null
+++ b/hgext/convert/cvs.py
@@ -0,0 +1,272 @@
+# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, re, socket, errno
+from cStringIO import StringIO
+from mercurial import encoding, util
+from mercurial.i18n import _
+
+from common import NoRepo, commit, converter_source, checktool
+import cvsps
+
+class convert_cvs(converter_source):
+ def __init__(self, ui, path, rev=None):
+ super(convert_cvs, self).__init__(ui, path, rev=rev)
+
+ cvs = os.path.join(path, "CVS")
+ if not os.path.exists(cvs):
+ raise NoRepo(_("%s does not look like a CVS checkout") % path)
+
+ checktool('cvs')
+
+ self.changeset = None
+ self.files = {}
+ self.tags = {}
+ self.lastbranch = {}
+ self.socket = None
+ self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
+ self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
+ self.encoding = encoding.encoding
+
+ self._connect()
+
+ def _parse(self):
+ if self.changeset is not None:
+ return
+ self.changeset = {}
+
+ maxrev = 0
+ if self.rev:
+ # TODO: handle tags
+ try:
+ # patchset number?
+ maxrev = int(self.rev)
+ except ValueError:
+ raise util.Abort(_('revision %s is not a patchset number')
+ % self.rev)
+
+ d = os.getcwd()
+ try:
+ os.chdir(self.path)
+ id = None
+
+ cache = 'update'
+ if not self.ui.configbool('convert', 'cvsps.cache', True):
+ cache = None
+ db = cvsps.createlog(self.ui, cache=cache)
+ db = cvsps.createchangeset(self.ui, db,
+ fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
+ mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
+ mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
+
+ for cs in db:
+ if maxrev and cs.id > maxrev:
+ break
+ id = str(cs.id)
+ cs.author = self.recode(cs.author)
+ self.lastbranch[cs.branch] = id
+ cs.comment = self.recode(cs.comment)
+ date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
+ self.tags.update(dict.fromkeys(cs.tags, id))
+
+ files = {}
+ for f in cs.entries:
+ files[f.file] = "%s%s" % ('.'.join([str(x)
+ for x in f.revision]),
+ ['', '(DEAD)'][f.dead])
+
+ # add current commit to set
+ c = commit(author=cs.author, date=date,
+ parents=[str(p.id) for p in cs.parents],
+ desc=cs.comment, branch=cs.branch or '')
+ self.changeset[id] = c
+ self.files[id] = files
+
+ self.heads = self.lastbranch.values()
+ finally:
+ os.chdir(d)
+
+ def _connect(self):
+ root = self.cvsroot
+ conntype = None
+ user, host = None, None
+ cmd = ['cvs', 'server']
+
+ self.ui.status(_("connecting to %s\n") % root)
+
+ if root.startswith(":pserver:"):
+ root = root[9:]
+ m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
+ root)
+ if m:
+ conntype = "pserver"
+ user, passw, serv, port, root = m.groups()
+ if not user:
+ user = "anonymous"
+ if not port:
+ port = 2401
+ else:
+ port = int(port)
+ format0 = ":pserver:%s@%s:%s" % (user, serv, root)
+ format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
+
+ if not passw:
+ passw = "A"
+ cvspass = os.path.expanduser("~/.cvspass")
+ try:
+ pf = open(cvspass)
+ for line in pf.read().splitlines():
+ part1, part2 = line.split(' ', 1)
+ # /1 :pserver:user@example.com:2401/cvsroot/foo
+ # Ah<Z
+ if part1 == '/1':
+ part1, part2 = part2.split(' ', 1)
+ format = format1
+ # :pserver:user@example.com:/cvsroot/foo Ah<Z
+ else:
+ format = format0
+ if part1 == format:
+ passw = part2
+ break
+ pf.close()
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ if not getattr(inst, 'filename', None):
+ inst.filename = cvspass
+ raise
+
+ sck = socket.socket()
+ sck.connect((serv, port))
+ sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
+ "END AUTH REQUEST", ""]))
+ if sck.recv(128) != "I LOVE YOU\n":
+ raise util.Abort(_("CVS pserver authentication failed"))
+
+ self.writep = self.readp = sck.makefile('r+')
+
+ if not conntype and root.startswith(":local:"):
+ conntype = "local"
+ root = root[7:]
+
+ if not conntype:
+ # :ext:user@host/home/user/path/to/cvsroot
+ if root.startswith(":ext:"):
+ root = root[5:]
+ m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
+ # Do not take Windows path "c:\foo\bar" for a connection strings
+ if os.path.isdir(root) or not m:
+ conntype = "local"
+ else:
+ conntype = "rsh"
+ user, host, root = m.group(1), m.group(2), m.group(3)
+
+ if conntype != "pserver":
+ if conntype == "rsh":
+ rsh = os.environ.get("CVS_RSH") or "ssh"
+ if user:
+ cmd = [rsh, '-l', user, host] + cmd
+ else:
+ cmd = [rsh, host] + cmd
+
+ # popen2 does not support argument lists under Windows
+ cmd = [util.shellquote(arg) for arg in cmd]
+ cmd = util.quotecommand(' '.join(cmd))
+ self.writep, self.readp = util.popen2(cmd)
+
+ self.realroot = root
+
+ self.writep.write("Root %s\n" % root)
+ self.writep.write("Valid-responses ok error Valid-requests Mode"
+ " M Mbinary E Checked-in Created Updated"
+ " Merged Removed\n")
+ self.writep.write("valid-requests\n")
+ self.writep.flush()
+ r = self.readp.readline()
+ if not r.startswith("Valid-requests"):
+ raise util.Abort(_('unexpected response from CVS server '
+ '(expected "Valid-requests", but got %r)')
+ % r)
+ if "UseUnchanged" in r:
+ self.writep.write("UseUnchanged\n")
+ self.writep.flush()
+ r = self.readp.readline()
+
+ def getheads(self):
+ self._parse()
+ return self.heads
+
+ def getfile(self, name, rev):
+
+ def chunkedread(fp, count):
+ # file-objects returned by socked.makefile() do not handle
+ # large read() requests very well.
+ chunksize = 65536
+ output = StringIO()
+ while count > 0:
+ data = fp.read(min(count, chunksize))
+ if not data:
+ raise util.Abort(_("%d bytes missing from remote file")
+ % count)
+ count -= len(data)
+ output.write(data)
+ return output.getvalue()
+
+ self._parse()
+ if rev.endswith("(DEAD)"):
+ raise IOError
+
+ args = ("-N -P -kk -r %s --" % rev).split()
+ args.append(self.cvsrepo + '/' + name)
+ for x in args:
+ self.writep.write("Argument %s\n" % x)
+ self.writep.write("Directory .\n%s\nco\n" % self.realroot)
+ self.writep.flush()
+
+ data = ""
+ mode = None
+ while True:
+ line = self.readp.readline()
+ if line.startswith("Created ") or line.startswith("Updated "):
+ self.readp.readline() # path
+ self.readp.readline() # entries
+ mode = self.readp.readline()[:-1]
+ count = int(self.readp.readline()[:-1])
+ data = chunkedread(self.readp, count)
+ elif line.startswith(" "):
+ data += line[1:]
+ elif line.startswith("M "):
+ pass
+ elif line.startswith("Mbinary "):
+ count = int(self.readp.readline()[:-1])
+ data = chunkedread(self.readp, count)
+ else:
+ if line == "ok\n":
+ if mode is None:
+ raise util.Abort(_('malformed response from CVS'))
+ return (data, "x" in mode and "x" or "")
+ elif line.startswith("E "):
+ self.ui.warn(_("cvs server: %s\n") % line[2:])
+ elif line.startswith("Remove"):
+ self.readp.readline()
+ else:
+ raise util.Abort(_("unknown CVS response: %s") % line)
+
+ def getchanges(self, rev):
+ self._parse()
+ return sorted(self.files[rev].iteritems()), {}
+
+ def getcommit(self, rev):
+ self._parse()
+ return self.changeset[rev]
+
+ def gettags(self):
+ self._parse()
+ return self.tags
+
+ def getchangedfiles(self, rev, i):
+ self._parse()
+ return sorted(self.files[rev])
diff --git a/hgext/convert/cvsps.py b/hgext/convert/cvsps.py
new file mode 100644
index 0000000..97184d5
--- /dev/null
+++ b/hgext/convert/cvsps.py
@@ -0,0 +1,853 @@
+# Mercurial built-in replacement for cvsps.
+#
+# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import re
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+from mercurial import hook
+from mercurial import util
+
+class logentry(object):
+ '''Class logentry has the following attributes:
+ .author - author name as CVS knows it
+ .branch - name of branch this revision is on
+ .branches - revision tuple of branches starting at this revision
+ .comment - commit message
+ .date - the commit date as a (time, tz) tuple
+ .dead - true if file revision is dead
+ .file - Name of file
+ .lines - a tuple (+lines, -lines) or None
+ .parent - Previous revision of this entry
+ .rcs - name of file as returned from CVS
+ .revision - revision number as tuple
+ .tags - list of tags on the file
+ .synthetic - is this a synthetic "file ... added on ..." revision?
+ .mergepoint- the branch that has been merged from
+ (if present in rlog output)
+ .branchpoints- the branches that start at the current entry
+ '''
+ def __init__(self, **entries):
+ self.synthetic = False
+ self.__dict__.update(entries)
+
+ def __repr__(self):
+ return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
+ id(self),
+ self.file,
+ ".".join(map(str, self.revision)))
+
+class logerror(Exception):
+ pass
+
+def getrepopath(cvspath):
+ """Return the repository path from a CVS path.
+
+ >>> getrepopath('/foo/bar')
+ '/foo/bar'
+ >>> getrepopath('c:/foo/bar')
+ 'c:/foo/bar'
+ >>> getrepopath(':pserver:10/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:10c:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:c:/foo/bar')
+ 'c:/foo/bar'
+ >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
+ 'c:/foo/bar'
+ """
+ # According to CVS manual, CVS paths are expressed like:
+ # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
+ #
+ # Unfortunately, Windows absolute paths start with a drive letter
+ # like 'c:' making it harder to parse. Here we assume that drive
+ # letters are only one character long and any CVS component before
+ # the repository path is at least 2 characters long, and use this
+ # to disambiguate.
+ parts = cvspath.split(':')
+ if len(parts) == 1:
+ return parts[0]
+ # Here there is an ambiguous case if we have a port number
+ # immediately followed by a Windows driver letter. We assume this
+ # never happens and decide it must be CVS path component,
+ # therefore ignoring it.
+ if len(parts[-2]) > 1:
+ return parts[-1].lstrip('0123456789')
+ return parts[-2] + ':' + parts[-1]
+
+def createlog(ui, directory=None, root="", rlog=True, cache=None):
+ '''Collect the CVS rlog'''
+
+ # Because we store many duplicate commit log messages, reusing strings
+ # saves a lot of memory and pickle storage space.
+ _scache = {}
+ def scache(s):
+ "return a shared version of a string"
+ return _scache.setdefault(s, s)
+
+ ui.status(_('collecting CVS rlog\n'))
+
+ log = [] # list of logentry objects containing the CVS state
+
+ # patterns to match in CVS (r)log output, by state of use
+ re_00 = re.compile('RCS file: (.+)$')
+ re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
+ re_02 = re.compile('cvs (r?log|server): (.+)\n$')
+ re_03 = re.compile("(Cannot access.+CVSROOT)|"
+ "(can't create temporary directory.+)$")
+ re_10 = re.compile('Working file: (.+)$')
+ re_20 = re.compile('symbolic names:')
+ re_30 = re.compile('\t(.+): ([\\d.]+)$')
+ re_31 = re.compile('----------------------------$')
+ re_32 = re.compile('======================================='
+ '======================================$')
+ re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
+ re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
+ r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
+ r'(.*mergepoint:\s+([^;]+);)?')
+ re_70 = re.compile('branches: (.+);$')
+
+ file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
+
+ prefix = '' # leading path to strip of what we get from CVS
+
+ if directory is None:
+ # Current working directory
+
+ # Get the real directory in the repository
+ try:
+ prefix = open(os.path.join('CVS','Repository')).read().strip()
+ directory = prefix
+ if prefix == ".":
+ prefix = ""
+ except IOError:
+ raise logerror(_('not a CVS sandbox'))
+
+ if prefix and not prefix.endswith(os.sep):
+ prefix += os.sep
+
+ # Use the Root file in the sandbox, if it exists
+ try:
+ root = open(os.path.join('CVS','Root')).read().strip()
+ except IOError:
+ pass
+
+ if not root:
+ root = os.environ.get('CVSROOT', '')
+
+ # read log cache if one exists
+ oldlog = []
+ date = None
+
+ if cache:
+ cachedir = os.path.expanduser('~/.hg.cvsps')
+ if not os.path.exists(cachedir):
+ os.mkdir(cachedir)
+
+ # The cvsps cache pickle needs a uniquified name, based on the
+ # repository location. The address may have all sort of nasties
+ # in it, slashes, colons and such. So here we take just the
+ # alphanumerics, concatenated in a way that does not mix up the
+ # various components, so that
+ # :pserver:user@server:/path
+ # and
+ # /pserver/user/server/path
+ # are mapped to different cache file names.
+ cachefile = root.split(":") + [directory, "cache"]
+ cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
+ cachefile = os.path.join(cachedir,
+ '.'.join([s for s in cachefile if s]))
+
+ if cache == 'update':
+ try:
+ ui.note(_('reading cvs log cache %s\n') % cachefile)
+ oldlog = pickle.load(open(cachefile))
+ ui.note(_('cache has %d log entries\n') % len(oldlog))
+ except Exception, e:
+ ui.note(_('error reading cache: %r\n') % e)
+
+ if oldlog:
+ date = oldlog[-1].date # last commit date as a (time,tz) tuple
+ date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
+
+ # build the CVS commandline
+ cmd = ['cvs', '-q']
+ if root:
+ cmd.append('-d%s' % root)
+ p = util.normpath(getrepopath(root))
+ if not p.endswith('/'):
+ p += '/'
+ if prefix:
+ # looks like normpath replaces "" by "."
+ prefix = p + util.normpath(prefix)
+ else:
+ prefix = p
+ cmd.append(['log', 'rlog'][rlog])
+ if date:
+ # no space between option and date string
+ cmd.append('-d>%s' % date)
+ cmd.append(directory)
+
+ # state machine begins here
+ tags = {} # dictionary of revisions on current file with their tags
+ branchmap = {} # mapping between branch names and revision numbers
+ state = 0
+ store = False # set when a new record can be appended
+
+ cmd = [util.shellquote(arg) for arg in cmd]
+ ui.note(_("running %s\n") % (' '.join(cmd)))
+ ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
+
+ pfp = util.popen(' '.join(cmd))
+ peek = pfp.readline()
+ while True:
+ line = peek
+ if line == '':
+ break
+ peek = pfp.readline()
+ if line.endswith('\n'):
+ line = line[:-1]
+ #ui.debug('state=%d line=%r\n' % (state, line))
+
+ if state == 0:
+ # initial state, consume input until we see 'RCS file'
+ match = re_00.match(line)
+ if match:
+ rcs = match.group(1)
+ tags = {}
+ if rlog:
+ filename = util.normpath(rcs[:-2])
+ if filename.startswith(prefix):
+ filename = filename[len(prefix):]
+ if filename.startswith('/'):
+ filename = filename[1:]
+ if filename.startswith('Attic/'):
+ filename = filename[6:]
+ else:
+ filename = filename.replace('/Attic/', '/')
+ state = 2
+ continue
+ state = 1
+ continue
+ match = re_01.match(line)
+ if match:
+ raise logerror(match.group(1))
+ match = re_02.match(line)
+ if match:
+ raise logerror(match.group(2))
+ if re_03.match(line):
+ raise logerror(line)
+
+ elif state == 1:
+ # expect 'Working file' (only when using log instead of rlog)
+ match = re_10.match(line)
+ assert match, _('RCS file must be followed by working file')
+ filename = util.normpath(match.group(1))
+ state = 2
+
+ elif state == 2:
+ # expect 'symbolic names'
+ if re_20.match(line):
+ branchmap = {}
+ state = 3
+
+ elif state == 3:
+ # read the symbolic names and store as tags
+ match = re_30.match(line)
+ if match:
+ rev = [int(x) for x in match.group(2).split('.')]
+
+ # Convert magic branch number to an odd-numbered one
+ revn = len(rev)
+ if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
+ rev = rev[:-2] + rev[-1:]
+ rev = tuple(rev)
+
+ if rev not in tags:
+ tags[rev] = []
+ tags[rev].append(match.group(1))
+ branchmap[match.group(1)] = match.group(2)
+
+ elif re_31.match(line):
+ state = 5
+ elif re_32.match(line):
+ state = 0
+
+ elif state == 4:
+ # expecting '------' separator before first revision
+ if re_31.match(line):
+ state = 5
+ else:
+ assert not re_32.match(line), _('must have at least '
+ 'some revisions')
+
+ elif state == 5:
+ # expecting revision number and possibly (ignored) lock indication
+ # we create the logentry here from values stored in states 0 to 4,
+ # as this state is re-entered for subsequent revisions of a file.
+ match = re_50.match(line)
+ assert match, _('expected revision number')
+ e = logentry(rcs=scache(rcs), file=scache(filename),
+ revision=tuple([int(x) for x in match.group(1).split('.')]),
+ branches=[], parent=None)
+ state = 6
+
+ elif state == 6:
+ # expecting date, author, state, lines changed
+ match = re_60.match(line)
+ assert match, _('revision must be followed by date line')
+ d = match.group(1)
+ if d[2] == '/':
+ # Y2K
+ d = '19' + d
+
+ if len(d.split()) != 3:
+ # cvs log dates always in GMT
+ d = d + ' UTC'
+ e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
+ '%Y/%m/%d %H:%M:%S',
+ '%Y-%m-%d %H:%M:%S'])
+ e.author = scache(match.group(2))
+ e.dead = match.group(3).lower() == 'dead'
+
+ if match.group(5):
+ if match.group(6):
+ e.lines = (int(match.group(5)), int(match.group(6)))
+ else:
+ e.lines = (int(match.group(5)), 0)
+ elif match.group(6):
+ e.lines = (0, int(match.group(6)))
+ else:
+ e.lines = None
+
+ if match.group(7): # cvsnt mergepoint
+ myrev = match.group(8).split('.')
+ if len(myrev) == 2: # head
+ e.mergepoint = 'HEAD'
+ else:
+ myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
+ branches = [b for b in branchmap if branchmap[b] == myrev]
+ assert len(branches) == 1, ('unknown branch: %s'
+ % e.mergepoint)
+ e.mergepoint = branches[0]
+ else:
+ e.mergepoint = None
+ e.comment = []
+ state = 7
+
+ elif state == 7:
+ # read the revision numbers of branches that start at this revision
+ # or store the commit log message otherwise
+ m = re_70.match(line)
+ if m:
+ e.branches = [tuple([int(y) for y in x.strip().split('.')])
+ for x in m.group(1).split(';')]
+ state = 8
+ elif re_31.match(line) and re_50.match(peek):
+ state = 5
+ store = True
+ elif re_32.match(line):
+ state = 0
+ store = True
+ else:
+ e.comment.append(line)
+
+ elif state == 8:
+ # store commit log message
+ if re_31.match(line):
+ cpeek = peek
+ if cpeek.endswith('\n'):
+ cpeek = cpeek[:-1]
+ if re_50.match(cpeek):
+ state = 5
+ store = True
+ else:
+ e.comment.append(line)
+ elif re_32.match(line):
+ state = 0
+ store = True
+ else:
+ e.comment.append(line)
+
+ # When a file is added on a branch B1, CVS creates a synthetic
+ # dead trunk revision 1.1 so that the branch has a root.
+ # Likewise, if you merge such a file to a later branch B2 (one
+ # that already existed when the file was added on B1), CVS
+ # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
+ # these revisions now, but mark them synthetic so
+ # createchangeset() can take care of them.
+ if (store and
+ e.dead and
+ e.revision[-1] == 1 and # 1.1 or 1.1.x.1
+ len(e.comment) == 1 and
+ file_added_re.match(e.comment[0])):
+ ui.debug('found synthetic revision in %s: %r\n'
+ % (e.rcs, e.comment[0]))
+ e.synthetic = True
+
+ if store:
+ # clean up the results and save in the log.
+ store = False
+ e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
+ e.comment = scache('\n'.join(e.comment))
+
+ revn = len(e.revision)
+ if revn > 3 and (revn % 2) == 0:
+ e.branch = tags.get(e.revision[:-1], [None])[0]
+ else:
+ e.branch = None
+
+ # find the branches starting from this revision
+ branchpoints = set()
+ for branch, revision in branchmap.iteritems():
+ revparts = tuple([int(i) for i in revision.split('.')])
+ if len(revparts) < 2: # bad tags
+ continue
+ if revparts[-2] == 0 and revparts[-1] % 2 == 0:
+ # normal branch
+ if revparts[:-2] == e.revision:
+ branchpoints.add(branch)
+ elif revparts == (1, 1, 1): # vendor branch
+ if revparts in e.branches:
+ branchpoints.add(branch)
+ e.branchpoints = branchpoints
+
+ log.append(e)
+
+ if len(log) % 100 == 0:
+ ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
+
+ log.sort(key=lambda x: (x.rcs, x.revision))
+
+ # find parent revisions of individual files
+ versions = {}
+ for e in log:
+ branch = e.revision[:-1]
+ p = versions.get((e.rcs, branch), None)
+ if p is None:
+ p = e.revision[:-2]
+ e.parent = p
+ versions[(e.rcs, branch)] = e.revision
+
+ # update the log cache
+ if cache:
+ if log:
+ # join up the old and new logs
+ log.sort(key=lambda x: x.date)
+
+ if oldlog and oldlog[-1].date >= log[0].date:
+ raise logerror(_('log cache overlaps with new log entries,'
+ ' re-run without cache.'))
+
+ log = oldlog + log
+
+ # write the new cachefile
+ ui.note(_('writing cvs log cache %s\n') % cachefile)
+ pickle.dump(log, open(cachefile, 'w'))
+ else:
+ log = oldlog
+
+ ui.status(_('%d log entries\n') % len(log))
+
+ hook.hook(ui, None, "cvslog", True, log=log)
+
+ return log
+
+
+class changeset(object):
+ '''Class changeset has the following attributes:
+ .id - integer identifying this changeset (list index)
+ .author - author name as CVS knows it
+ .branch - name of branch this changeset is on, or None
+ .comment - commit message
+ .date - the commit date as a (time,tz) tuple
+ .entries - list of logentry objects in this changeset
+ .parents - list of one or two parent changesets
+ .tags - list of tags on this changeset
+ .synthetic - from synthetic revision "file ... added on branch ..."
+ .mergepoint- the branch that has been merged from
+ (if present in rlog output)
+ .branchpoints- the branches that start at the current entry
+ '''
+ def __init__(self, **entries):
+ self.synthetic = False
+ self.__dict__.update(entries)
+
+ def __repr__(self):
+ return "<%s at 0x%x: %s>" % (self.__class__.__name__,
+ id(self),
+ getattr(self, 'id', "(no id)"))
+
+def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
+ '''Convert log into changesets.'''
+
+ ui.status(_('creating changesets\n'))
+
+ # Merge changesets
+
+ log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
+
+ changesets = []
+ files = set()
+ c = None
+ for i, e in enumerate(log):
+
+ # Check if log entry belongs to the current changeset or not.
+
+ # Since CVS is file centric, two different file revisions with
+ # different branchpoints should be treated as belonging to two
+ # different changesets (and the ordering is important and not
+ # honoured by cvsps at this point).
+ #
+ # Consider the following case:
+ # foo 1.1 branchpoints: [MYBRANCH]
+ # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
+ #
+ # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
+ # later version of foo may be in MYBRANCH2, so foo should be the
+ # first changeset and bar the next and MYBRANCH and MYBRANCH2
+ # should both start off of the bar changeset. No provisions are
+ # made to ensure that this is, in fact, what happens.
+ if not (c and
+ e.comment == c.comment and
+ e.author == c.author and
+ e.branch == c.branch and
+ (not util.safehasattr(e, 'branchpoints') or
+ not util.safehasattr (c, 'branchpoints') or
+ e.branchpoints == c.branchpoints) and
+ ((c.date[0] + c.date[1]) <=
+ (e.date[0] + e.date[1]) <=
+ (c.date[0] + c.date[1]) + fuzz) and
+ e.file not in files):
+ c = changeset(comment=e.comment, author=e.author,
+ branch=e.branch, date=e.date, entries=[],
+ mergepoint=getattr(e, 'mergepoint', None),
+ branchpoints=getattr(e, 'branchpoints', set()))
+ changesets.append(c)
+ files = set()
+ if len(changesets) % 100 == 0:
+ t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
+ ui.status(util.ellipsis(t, 80) + '\n')
+
+ c.entries.append(e)
+ files.add(e.file)
+ c.date = e.date # changeset date is date of latest commit in it
+
+ # Mark synthetic changesets
+
+ for c in changesets:
+ # Synthetic revisions always get their own changeset, because
+ # the log message includes the filename. E.g. if you add file3
+ # and file4 on a branch, you get four log entries and three
+ # changesets:
+ # "File file3 was added on branch ..." (synthetic, 1 entry)
+ # "File file4 was added on branch ..." (synthetic, 1 entry)
+ # "Add file3 and file4 to fix ..." (real, 2 entries)
+ # Hence the check for 1 entry here.
+ c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
+
+ # Sort files in each changeset
+
+ def entitycompare(l, r):
+ 'Mimic cvsps sorting order'
+ l = l.file.split('/')
+ r = r.file.split('/')
+ nl = len(l)
+ nr = len(r)
+ n = min(nl, nr)
+ for i in range(n):
+ if i + 1 == nl and nl < nr:
+ return -1
+ elif i + 1 == nr and nl > nr:
+ return +1
+ elif l[i] < r[i]:
+ return -1
+ elif l[i] > r[i]:
+ return +1
+ return 0
+
+ for c in changesets:
+ c.entries.sort(entitycompare)
+
+ # Sort changesets by date
+
+ def cscmp(l, r):
+ d = sum(l.date) - sum(r.date)
+ if d:
+ return d
+
+ # detect vendor branches and initial commits on a branch
+ le = {}
+ for e in l.entries:
+ le[e.rcs] = e.revision
+ re = {}
+ for e in r.entries:
+ re[e.rcs] = e.revision
+
+ d = 0
+ for e in l.entries:
+ if re.get(e.rcs, None) == e.parent:
+ assert not d
+ d = 1
+ break
+
+ for e in r.entries:
+ if le.get(e.rcs, None) == e.parent:
+ assert not d
+ d = -1
+ break
+
+ return d
+
+ changesets.sort(cscmp)
+
+ # Collect tags
+
+ globaltags = {}
+ for c in changesets:
+ for e in c.entries:
+ for tag in e.tags:
+ # remember which is the latest changeset to have this tag
+ globaltags[tag] = c
+
+ for c in changesets:
+ tags = set()
+ for e in c.entries:
+ tags.update(e.tags)
+ # remember tags only if this is the latest changeset to have it
+ c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
+
+ # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
+ # by inserting dummy changesets with two parents, and handle
+ # {{mergefrombranch BRANCHNAME}} by setting two parents.
+
+ if mergeto is None:
+ mergeto = r'{{mergetobranch ([-\w]+)}}'
+ if mergeto:
+ mergeto = re.compile(mergeto)
+
+ if mergefrom is None:
+ mergefrom = r'{{mergefrombranch ([-\w]+)}}'
+ if mergefrom:
+ mergefrom = re.compile(mergefrom)
+
+ versions = {} # changeset index where we saw any particular file version
+ branches = {} # changeset index where we saw a branch
+ n = len(changesets)
+ i = 0
+ while i < n:
+ c = changesets[i]
+
+ for f in c.entries:
+ versions[(f.rcs, f.revision)] = i
+
+ p = None
+ if c.branch in branches:
+ p = branches[c.branch]
+ else:
+ # first changeset on a new branch
+ # the parent is a changeset with the branch in its
+ # branchpoints such that it is the latest possible
+ # commit without any intervening, unrelated commits.
+
+ for candidate in xrange(i):
+ if c.branch not in changesets[candidate].branchpoints:
+ if p is not None:
+ break
+ continue
+ p = candidate
+
+ c.parents = []
+ if p is not None:
+ p = changesets[p]
+
+ # Ensure no changeset has a synthetic changeset as a parent.
+ while p.synthetic:
+ assert len(p.parents) <= 1, \
+ _('synthetic changeset cannot have multiple parents')
+ if p.parents:
+ p = p.parents[0]
+ else:
+ p = None
+ break
+
+ if p is not None:
+ c.parents.append(p)
+
+ if c.mergepoint:
+ if c.mergepoint == 'HEAD':
+ c.mergepoint = None
+ c.parents.append(changesets[branches[c.mergepoint]])
+
+ if mergefrom:
+ m = mergefrom.search(c.comment)
+ if m:
+ m = m.group(1)
+ if m == 'HEAD':
+ m = None
+ try:
+ candidate = changesets[branches[m]]
+ except KeyError:
+ ui.warn(_("warning: CVS commit message references "
+ "non-existent branch %r:\n%s\n")
+ % (m, c.comment))
+ if m in branches and c.branch != m and not candidate.synthetic:
+ c.parents.append(candidate)
+
+ if mergeto:
+ m = mergeto.search(c.comment)
+ if m:
+ if m.groups():
+ m = m.group(1)
+ if m == 'HEAD':
+ m = None
+ else:
+ m = None # if no group found then merge to HEAD
+ if m in branches and c.branch != m:
+ # insert empty changeset for merge
+ cc = changeset(
+ author=c.author, branch=m, date=c.date,
+ comment='convert-repo: CVS merge from branch %s'
+ % c.branch,
+ entries=[], tags=[],
+ parents=[changesets[branches[m]], c])
+ changesets.insert(i + 1, cc)
+ branches[m] = i + 1
+
+ # adjust our loop counters now we have inserted a new entry
+ n += 1
+ i += 2
+ continue
+
+ branches[c.branch] = i
+ i += 1
+
+ # Drop synthetic changesets (safe now that we have ensured no other
+ # changesets can have them as parents).
+ i = 0
+ while i < len(changesets):
+ if changesets[i].synthetic:
+ del changesets[i]
+ else:
+ i += 1
+
+ # Number changesets
+
+ for i, c in enumerate(changesets):
+ c.id = i + 1
+
+ ui.status(_('%d changeset entries\n') % len(changesets))
+
+ hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
+
+ return changesets
+
+
+def debugcvsps(ui, *args, **opts):
+ '''Read CVS rlog for current directory or named path in
+ repository, and convert the log to changesets based on matching
+ commit log entries and dates.
+ '''
+ if opts["new_cache"]:
+ cache = "write"
+ elif opts["update_cache"]:
+ cache = "update"
+ else:
+ cache = None
+
+ revisions = opts["revisions"]
+
+ try:
+ if args:
+ log = []
+ for d in args:
+ log += createlog(ui, d, root=opts["root"], cache=cache)
+ else:
+ log = createlog(ui, root=opts["root"], cache=cache)
+ except logerror, e:
+ ui.write("%r\n"%e)
+ return
+
+ changesets = createchangeset(ui, log, opts["fuzz"])
+ del log
+
+ # Print changesets (optionally filtered)
+
+ off = len(revisions)
+ branches = {} # latest version number in each branch
+ ancestors = {} # parent branch
+ for cs in changesets:
+
+ if opts["ancestors"]:
+ if cs.branch not in branches and cs.parents and cs.parents[0].id:
+ ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
+ cs.parents[0].id)
+ branches[cs.branch] = cs.id
+
+ # limit by branches
+ if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
+ continue
+
+ if not off:
+ # Note: trailing spaces on several lines here are needed to have
+ # bug-for-bug compatibility with cvsps.
+ ui.write('---------------------\n')
+ ui.write('PatchSet %d \n' % cs.id)
+ ui.write('Date: %s\n' % util.datestr(cs.date,
+ '%Y/%m/%d %H:%M:%S %1%2'))
+ ui.write('Author: %s\n' % cs.author)
+ ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
+ ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
+ ','.join(cs.tags) or '(none)'))
+ branchpoints = getattr(cs, 'branchpoints', None)
+ if branchpoints:
+ ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
+ if opts["parents"] and cs.parents:
+ if len(cs.parents) > 1:
+ ui.write('Parents: %s\n' %
+ (','.join([str(p.id) for p in cs.parents])))
+ else:
+ ui.write('Parent: %d\n' % cs.parents[0].id)
+
+ if opts["ancestors"]:
+ b = cs.branch
+ r = []
+ while b:
+ b, c = ancestors[b]
+ r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
+ if r:
+ ui.write('Ancestors: %s\n' % (','.join(r)))
+
+ ui.write('Log:\n')
+ ui.write('%s\n\n' % cs.comment)
+ ui.write('Members: \n')
+ for f in cs.entries:
+ fn = f.file
+ if fn.startswith(opts["prefix"]):
+ fn = fn[len(opts["prefix"]):]
+ ui.write('\t%s:%s->%s%s \n' % (
+ fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
+ '.'.join([str(x) for x in f.revision]),
+ ['', '(DEAD)'][f.dead]))
+ ui.write('\n')
+
+ # have we seen the start tag?
+ if revisions and off:
+ if revisions[0] == str(cs.id) or \
+ revisions[0] in cs.tags:
+ off = False
+
+ # see if we reached the end tag
+ if len(revisions) > 1 and not off:
+ if revisions[1] == str(cs.id) or \
+ revisions[1] in cs.tags:
+ break
diff --git a/hgext/convert/darcs.py b/hgext/convert/darcs.py
new file mode 100644
index 0000000..b10a533
--- /dev/null
+++ b/hgext/convert/darcs.py
@@ -0,0 +1,200 @@
+# darcs.py - darcs support for the convert extension
+#
+# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, checktool, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import util
+import os, shutil, tempfile, re
+
+# The naming drift of ElementTree is fun!
+
+try:
+ from xml.etree.cElementTree import ElementTree, XMLParser
+except ImportError:
+ try:
+ from xml.etree.ElementTree import ElementTree, XMLParser
+ except ImportError:
+ try:
+ from elementtree.cElementTree import ElementTree, XMLParser
+ except ImportError:
+ try:
+ from elementtree.ElementTree import ElementTree, XMLParser
+ except ImportError:
+ pass
+
+class darcs_source(converter_source, commandline):
+ def __init__(self, ui, path, rev=None):
+ converter_source.__init__(self, ui, path, rev=rev)
+ commandline.__init__(self, ui, 'darcs')
+
+ # check for _darcs, ElementTree so that we can easily skip
+ # test-convert-darcs if ElementTree is not around
+ if not os.path.exists(os.path.join(path, '_darcs')):
+ raise NoRepo(_("%s does not look like a darcs repository") % path)
+
+ checktool('darcs')
+ version = self.run0('--version').splitlines()[0].strip()
+ if version < '2.1':
+ raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
+ version)
+
+ if "ElementTree" not in globals():
+ raise util.Abort(_("Python ElementTree module is not available"))
+
+ self.path = os.path.realpath(path)
+
+ self.lastrev = None
+ self.changes = {}
+ self.parents = {}
+ self.tags = {}
+
+ # Check darcs repository format
+ format = self.format()
+ if format:
+ if format in ('darcs-1.0', 'hashed'):
+ raise NoRepo(_("%s repository format is unsupported, "
+ "please upgrade") % format)
+ else:
+ self.ui.warn(_('failed to detect repository format!'))
+
+ def before(self):
+ self.tmppath = tempfile.mkdtemp(
+ prefix='convert-' + os.path.basename(self.path) + '-')
+ output, status = self.run('init', repodir=self.tmppath)
+ self.checkexit(status)
+
+ tree = self.xml('changes', xml_output=True, summary=True,
+ repodir=self.path)
+ tagname = None
+ child = None
+ for elt in tree.findall('patch'):
+ node = elt.get('hash')
+ name = elt.findtext('name', '')
+ if name.startswith('TAG '):
+ tagname = name[4:].strip()
+ elif tagname is not None:
+ self.tags[tagname] = node
+ tagname = None
+ self.changes[node] = elt
+ self.parents[child] = [node]
+ child = node
+ self.parents[child] = []
+
+ def after(self):
+ self.ui.debug('cleaning up %s\n' % self.tmppath)
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+
+ def recode(self, s, encoding=None):
+ if isinstance(s, unicode):
+ # XMLParser returns unicode objects for anything it can't
+ # encode into ASCII. We convert them back to str to get
+ # recode's normal conversion behavior.
+ s = s.encode('latin-1')
+ return super(darcs_source, self).recode(s, encoding)
+
+ def xml(self, cmd, **kwargs):
+ # NOTE: darcs is currently encoding agnostic and will print
+ # patch metadata byte-for-byte, even in the XML changelog.
+ etree = ElementTree()
+ # While we are decoding the XML as latin-1 to be as liberal as
+ # possible, etree will still raise an exception if any
+ # non-printable characters are in the XML changelog.
+ parser = XMLParser(encoding='latin-1')
+ fp = self._run(cmd, **kwargs)
+ etree.parse(fp, parser=parser)
+ self.checkexit(fp.close())
+ return etree.getroot()
+
+ def format(self):
+ output, status = self.run('show', 'repo', no_files=True,
+ repodir=self.path)
+ self.checkexit(status)
+ m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
+ if not m:
+ return None
+ return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
+
+ def manifest(self):
+ man = []
+ output, status = self.run('show', 'files', no_directories=True,
+ repodir=self.tmppath)
+ self.checkexit(status)
+ for line in output.split('\n'):
+ path = line[2:]
+ if path:
+ man.append(path)
+ return man
+
+ def getheads(self):
+ return self.parents[None]
+
+ def getcommit(self, rev):
+ elt = self.changes[rev]
+ date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
+ desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
+ # etree can return unicode objects for name, comment, and author,
+ # so recode() is used to ensure str objects are emitted.
+ return commit(author=self.recode(elt.get('author')),
+ date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ desc=self.recode(desc).strip(),
+ parents=self.parents[rev])
+
+ def pull(self, rev):
+ output, status = self.run('pull', self.path, all=True,
+ match='hash %s' % rev,
+ no_test=True, no_posthook=True,
+ external_merge='/bin/false',
+ repodir=self.tmppath)
+ if status:
+ if output.find('We have conflicts in') == -1:
+ self.checkexit(status, output)
+ output, status = self.run('revert', all=True, repodir=self.tmppath)
+ self.checkexit(status, output)
+
+ def getchanges(self, rev):
+ copies = {}
+ changes = []
+ man = None
+ for elt in self.changes[rev].find('summary').getchildren():
+ if elt.tag in ('add_directory', 'remove_directory'):
+ continue
+ if elt.tag == 'move':
+ if man is None:
+ man = self.manifest()
+ source, dest = elt.get('from'), elt.get('to')
+ if source in man:
+ # File move
+ changes.append((source, rev))
+ changes.append((dest, rev))
+ copies[dest] = source
+ else:
+ # Directory move, deduce file moves from manifest
+ source = source + '/'
+ for f in man:
+ if not f.startswith(source):
+ continue
+ fdest = dest + '/' + f[len(source):]
+ changes.append((f, rev))
+ changes.append((fdest, rev))
+ copies[fdest] = f
+ else:
+ changes.append((elt.text.strip(), rev))
+ self.pull(rev)
+ self.lastrev = rev
+ return sorted(changes), copies
+
+ def getfile(self, name, rev):
+ if rev != self.lastrev:
+ raise util.Abort(_('internal calling inconsistency'))
+ path = os.path.join(self.tmppath, name)
+ data = util.readfile(path)
+ mode = os.lstat(path).st_mode
+ mode = (mode & 0111) and 'x' or ''
+ return data, mode
+
+ def gettags(self):
+ return self.tags
diff --git a/hgext/convert/filemap.py b/hgext/convert/filemap.py
new file mode 100644
index 0000000..c14df16
--- /dev/null
+++ b/hgext/convert/filemap.py
@@ -0,0 +1,391 @@
+# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import shlex
+from mercurial.i18n import _
+from mercurial import util
+from common import SKIPREV, converter_source
+
+def rpairs(name):
+ e = len(name)
+ while e != -1:
+ yield name[:e], name[e + 1:]
+ e = name.rfind('/', 0, e)
+ yield '.', name
+
+class filemapper(object):
+ '''Map and filter filenames when importing.
+ A name can be mapped to itself, a new name, or None (omit from new
+ repository).'''
+
+ def __init__(self, ui, path=None):
+ self.ui = ui
+ self.include = {}
+ self.exclude = {}
+ self.rename = {}
+ if path:
+ if self.parse(path):
+ raise util.Abort(_('errors in filemap'))
+
+ def parse(self, path):
+ errs = 0
+ def check(name, mapping, listname):
+ if not name:
+ self.ui.warn(_('%s:%d: path to %s is missing\n') %
+ (lex.infile, lex.lineno, listname))
+ return 1
+ if name in mapping:
+ self.ui.warn(_('%s:%d: %r already in %s list\n') %
+ (lex.infile, lex.lineno, name, listname))
+ return 1
+ if (name.startswith('/') or
+ name.endswith('/') or
+ '//' in name):
+ self.ui.warn(_('%s:%d: superfluous / in %s %r\n') %
+ (lex.infile, lex.lineno, listname, name))
+ return 1
+ return 0
+ lex = shlex.shlex(open(path), path, True)
+ lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
+ cmd = lex.get_token()
+ while cmd:
+ if cmd == 'include':
+ name = lex.get_token()
+ errs += check(name, self.exclude, 'exclude')
+ self.include[name] = name
+ elif cmd == 'exclude':
+ name = lex.get_token()
+ errs += check(name, self.include, 'include')
+ errs += check(name, self.rename, 'rename')
+ self.exclude[name] = name
+ elif cmd == 'rename':
+ src = lex.get_token()
+ dest = lex.get_token()
+ errs += check(src, self.exclude, 'exclude')
+ self.rename[src] = dest
+ elif cmd == 'source':
+ errs += self.parse(lex.get_token())
+ else:
+ self.ui.warn(_('%s:%d: unknown directive %r\n') %
+ (lex.infile, lex.lineno, cmd))
+ errs += 1
+ cmd = lex.get_token()
+ return errs
+
+ def lookup(self, name, mapping):
+ for pre, suf in rpairs(name):
+ try:
+ return mapping[pre], pre, suf
+ except KeyError:
+ pass
+ return '', name, ''
+
+ def __call__(self, name):
+ if self.include:
+ inc = self.lookup(name, self.include)[0]
+ else:
+ inc = name
+ if self.exclude:
+ exc = self.lookup(name, self.exclude)[0]
+ else:
+ exc = ''
+ if (not self.include and exc) or (len(inc) <= len(exc)):
+ return None
+ newpre, pre, suf = self.lookup(name, self.rename)
+ if newpre:
+ if newpre == '.':
+ return suf
+ if suf:
+ if newpre.endswith('/'):
+ return newpre + suf
+ return newpre + '/' + suf
+ return newpre
+ return name
+
+ def active(self):
+ return bool(self.include or self.exclude or self.rename)
+
+# This class does two additional things compared to a regular source:
+#
+# - Filter and rename files. This is mostly wrapped by the filemapper
+# class above. We hide the original filename in the revision that is
+# returned by getchanges to be able to find things later in getfile.
+#
+# - Return only revisions that matter for the files we're interested in.
+# This involves rewriting the parents of the original revision to
+# create a graph that is restricted to those revisions.
+#
+# This set of revisions includes not only revisions that directly
+# touch files we're interested in, but also merges that merge two
+# or more interesting revisions.
+
+class filemap_source(converter_source):
+ def __init__(self, ui, baseconverter, filemap):
+ super(filemap_source, self).__init__(ui)
+ self.base = baseconverter
+ self.filemapper = filemapper(ui, filemap)
+ self.commits = {}
+ # if a revision rev has parent p in the original revision graph, then
+ # rev will have parent self.parentmap[p] in the restricted graph.
+ self.parentmap = {}
+ # self.wantedancestors[rev] is the set of all ancestors of rev that
+ # are in the restricted graph.
+ self.wantedancestors = {}
+ self.convertedorder = None
+ self._rebuilt = False
+ self.origparents = {}
+ self.children = {}
+ self.seenchildren = {}
+
+ def before(self):
+ self.base.before()
+
+ def after(self):
+ self.base.after()
+
+ def setrevmap(self, revmap):
+ # rebuild our state to make things restartable
+ #
+ # To avoid calling getcommit for every revision that has already
+ # been converted, we rebuild only the parentmap, delaying the
+ # rebuild of wantedancestors until we need it (i.e. until a
+ # merge).
+ #
+ # We assume the order argument lists the revisions in
+ # topological order, so that we can infer which revisions were
+ # wanted by previous runs.
+ self._rebuilt = not revmap
+ seen = {SKIPREV: SKIPREV}
+ dummyset = set()
+ converted = []
+ for rev in revmap.order:
+ mapped = revmap[rev]
+ wanted = mapped not in seen
+ if wanted:
+ seen[mapped] = rev
+ self.parentmap[rev] = rev
+ else:
+ self.parentmap[rev] = seen[mapped]
+ self.wantedancestors[rev] = dummyset
+ arg = seen[mapped]
+ if arg == SKIPREV:
+ arg = None
+ converted.append((rev, wanted, arg))
+ self.convertedorder = converted
+ return self.base.setrevmap(revmap)
+
+ def rebuild(self):
+ if self._rebuilt:
+ return True
+ self._rebuilt = True
+ self.parentmap.clear()
+ self.wantedancestors.clear()
+ self.seenchildren.clear()
+ for rev, wanted, arg in self.convertedorder:
+ if rev not in self.origparents:
+ self.origparents[rev] = self.getcommit(rev).parents
+ if arg is not None:
+ self.children[arg] = self.children.get(arg, 0) + 1
+
+ for rev, wanted, arg in self.convertedorder:
+ parents = self.origparents[rev]
+ if wanted:
+ self.mark_wanted(rev, parents)
+ else:
+ self.mark_not_wanted(rev, arg)
+ self._discard(arg, *parents)
+
+ return True
+
+ def getheads(self):
+ return self.base.getheads()
+
+ def getcommit(self, rev):
+ # We want to save a reference to the commit objects to be able
+ # to rewrite their parents later on.
+ c = self.commits[rev] = self.base.getcommit(rev)
+ for p in c.parents:
+ self.children[p] = self.children.get(p, 0) + 1
+ return c
+
+ def _cachedcommit(self, rev):
+ if rev in self.commits:
+ return self.commits[rev]
+ return self.base.getcommit(rev)
+
+ def _discard(self, *revs):
+ for r in revs:
+ if r is None:
+ continue
+ self.seenchildren[r] = self.seenchildren.get(r, 0) + 1
+ if self.seenchildren[r] == self.children[r]:
+ del self.wantedancestors[r]
+ del self.parentmap[r]
+ del self.seenchildren[r]
+ if self._rebuilt:
+ del self.children[r]
+
+ def wanted(self, rev, i):
+ # Return True if we're directly interested in rev.
+ #
+ # i is an index selecting one of the parents of rev (if rev
+ # has no parents, i is None). getchangedfiles will give us
+ # the list of files that are different in rev and in the parent
+ # indicated by i. If we're interested in any of these files,
+ # we're interested in rev.
+ try:
+ files = self.base.getchangedfiles(rev, i)
+ except NotImplementedError:
+ raise util.Abort(_("source repository doesn't support --filemap"))
+ for f in files:
+ if self.filemapper(f):
+ return True
+ return False
+
+ def mark_not_wanted(self, rev, p):
+ # Mark rev as not interesting and update data structures.
+
+ if p is None:
+ # A root revision. Use SKIPREV to indicate that it doesn't
+ # map to any revision in the restricted graph. Put SKIPREV
+ # in the set of wanted ancestors to simplify code elsewhere
+ self.parentmap[rev] = SKIPREV
+ self.wantedancestors[rev] = set((SKIPREV,))
+ return
+
+ # Reuse the data from our parent.
+ self.parentmap[rev] = self.parentmap[p]
+ self.wantedancestors[rev] = self.wantedancestors[p]
+
+ def mark_wanted(self, rev, parents):
+ # Mark rev ss wanted and update data structures.
+
+ # rev will be in the restricted graph, so children of rev in
+ # the original graph should still have rev as a parent in the
+ # restricted graph.
+ self.parentmap[rev] = rev
+
+ # The set of wanted ancestors of rev is the union of the sets
+ # of wanted ancestors of its parents. Plus rev itself.
+ wrev = set()
+ for p in parents:
+ wrev.update(self.wantedancestors[p])
+ wrev.add(rev)
+ self.wantedancestors[rev] = wrev
+
+ def getchanges(self, rev):
+ parents = self.commits[rev].parents
+ if len(parents) > 1:
+ self.rebuild()
+
+ # To decide whether we're interested in rev we:
+ #
+ # - calculate what parents rev will have if it turns out we're
+ # interested in it. If it's going to have more than 1 parent,
+ # we're interested in it.
+ #
+ # - otherwise, we'll compare it with the single parent we found.
+ # If any of the files we're interested in is different in the
+ # the two revisions, we're interested in rev.
+
+ # A parent p is interesting if its mapped version (self.parentmap[p]):
+ # - is not SKIPREV
+ # - is still not in the list of parents (we don't want duplicates)
+ # - is not an ancestor of the mapped versions of the other parents or
+ # there is no parent in the same branch than the current revision.
+ mparents = []
+ knownparents = set()
+ branch = self.commits[rev].branch
+ hasbranchparent = False
+ for i, p1 in enumerate(parents):
+ mp1 = self.parentmap[p1]
+ if mp1 == SKIPREV or mp1 in knownparents:
+ continue
+ isancestor = util.any(p2 for p2 in parents
+ if p1 != p2 and mp1 != self.parentmap[p2]
+ and mp1 in self.wantedancestors[p2])
+ if not isancestor and not hasbranchparent and len(parents) > 1:
+ # This could be expensive, avoid unnecessary calls.
+ if self._cachedcommit(p1).branch == branch:
+ hasbranchparent = True
+ mparents.append((p1, mp1, i, isancestor))
+ knownparents.add(mp1)
+ # Discard parents ancestors of other parents if there is a
+ # non-ancestor one on the same branch than current revision.
+ if hasbranchparent:
+ mparents = [p for p in mparents if not p[3]]
+ wp = None
+ if mparents:
+ wp = max(p[2] for p in mparents)
+ mparents = [p[1] for p in mparents]
+ elif parents:
+ wp = 0
+
+ self.origparents[rev] = parents
+
+ closed = False
+ if 'close' in self.commits[rev].extra:
+ # A branch closing revision is only useful if one of its
+ # parents belong to the branch being closed
+ pbranches = [self._cachedcommit(p).branch for p in mparents]
+ if branch in pbranches:
+ closed = True
+
+ if len(mparents) < 2 and not closed and not self.wanted(rev, wp):
+ # We don't want this revision.
+ # Update our state and tell the convert process to map this
+ # revision to the same revision its parent as mapped to.
+ p = None
+ if parents:
+ p = parents[wp]
+ self.mark_not_wanted(rev, p)
+ self.convertedorder.append((rev, False, p))
+ self._discard(*parents)
+ return self.parentmap[rev]
+
+ # We want this revision.
+ # Rewrite the parents of the commit object
+ self.commits[rev].parents = mparents
+ self.mark_wanted(rev, parents)
+ self.convertedorder.append((rev, True, None))
+ self._discard(*parents)
+
+ # Get the real changes and do the filtering/mapping. To be
+ # able to get the files later on in getfile, we hide the
+ # original filename in the rev part of the return value.
+ changes, copies = self.base.getchanges(rev)
+ files = {}
+ for f, r in changes:
+ newf = self.filemapper(f)
+ if newf and (newf != f or newf not in files):
+ files[newf] = (f, r)
+ files = sorted(files.items())
+
+ ncopies = {}
+ for c in copies:
+ newc = self.filemapper(c)
+ if newc:
+ newsource = self.filemapper(copies[c])
+ if newsource:
+ ncopies[newc] = newsource
+
+ return files, ncopies
+
+ def getfile(self, name, rev):
+ realname, realrev = rev
+ return self.base.getfile(realname, realrev)
+
+ def gettags(self):
+ return self.base.gettags()
+
+ def hasnativeorder(self):
+ return self.base.hasnativeorder()
+
+ def lookuprev(self, rev):
+ return self.base.lookuprev(rev)
+
+ def getbookmarks(self):
+ return self.base.getbookmarks()
diff --git a/hgext/convert/git.py b/hgext/convert/git.py
new file mode 100644
index 0000000..8058399
--- /dev/null
+++ b/hgext/convert/git.py
@@ -0,0 +1,217 @@
+# git.py - git support for the convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+from mercurial import util
+from mercurial.node import hex, nullid
+from mercurial.i18n import _
+
+from common import NoRepo, commit, converter_source, checktool
+
+class convert_git(converter_source):
+ # Windows does not support GIT_DIR= construct while other systems
+ # cannot remove environment variable. Just assume none have
+ # both issues.
+ if util.safehasattr(os, 'unsetenv'):
+ def gitopen(self, s, noerr=False):
+ prevgitdir = os.environ.get('GIT_DIR')
+ os.environ['GIT_DIR'] = self.path
+ try:
+ if noerr:
+ (stdin, stdout, stderr) = util.popen3(s)
+ return stdout
+ else:
+ return util.popen(s, 'rb')
+ finally:
+ if prevgitdir is None:
+ del os.environ['GIT_DIR']
+ else:
+ os.environ['GIT_DIR'] = prevgitdir
+ else:
+ def gitopen(self, s, noerr=False):
+ if noerr:
+ (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
+ return so
+ else:
+ return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
+
+ def gitread(self, s):
+ fh = self.gitopen(s)
+ data = fh.read()
+ return data, fh.close()
+
+ def __init__(self, ui, path, rev=None):
+ super(convert_git, self).__init__(ui, path, rev=rev)
+
+ if os.path.isdir(path + "/.git"):
+ path += "/.git"
+ if not os.path.exists(path + "/objects"):
+ raise NoRepo(_("%s does not look like a Git repository") % path)
+
+ checktool('git', 'git')
+
+ self.path = path
+
+ def getheads(self):
+ if not self.rev:
+ heads, ret = self.gitread('git rev-parse --branches --remotes')
+ heads = heads.splitlines()
+ else:
+ heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
+ heads = [heads[:-1]]
+ if ret:
+ raise util.Abort(_('cannot retrieve git heads'))
+ return heads
+
+ def catfile(self, rev, type):
+ if rev == hex(nullid):
+ raise IOError
+ data, ret = self.gitread("git cat-file %s %s" % (type, rev))
+ if ret:
+ raise util.Abort(_('cannot read %r object at %s') % (type, rev))
+ return data
+
+ def getfile(self, name, rev):
+ data = self.catfile(rev, "blob")
+ mode = self.modecache[(name, rev)]
+ return data, mode
+
+ def getchanges(self, version):
+ self.modecache = {}
+ fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
+ changes = []
+ seen = set()
+ entry = None
+ for l in fh.read().split('\x00'):
+ if not entry:
+ if not l.startswith(':'):
+ continue
+ entry = l
+ continue
+ f = l
+ if f not in seen:
+ seen.add(f)
+ entry = entry.split()
+ h = entry[3]
+ if entry[1] == '160000':
+ raise util.Abort('git submodules are not supported!')
+ p = (entry[1] == "100755")
+ s = (entry[1] == "120000")
+ self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
+ changes.append((f, h))
+ entry = None
+ if fh.close():
+ raise util.Abort(_('cannot read changes in %s') % version)
+ return (changes, {})
+
+ def getcommit(self, version):
+ c = self.catfile(version, "commit") # read the commit hash
+ end = c.find("\n\n")
+ message = c[end + 2:]
+ message = self.recode(message)
+ l = c[:end].splitlines()
+ parents = []
+ author = committer = None
+ for e in l[1:]:
+ n, v = e.split(" ", 1)
+ if n == "author":
+ p = v.split()
+ tm, tz = p[-2:]
+ author = " ".join(p[:-2])
+ if author[0] == "<": author = author[1:-1]
+ author = self.recode(author)
+ if n == "committer":
+ p = v.split()
+ tm, tz = p[-2:]
+ committer = " ".join(p[:-2])
+ if committer[0] == "<": committer = committer[1:-1]
+ committer = self.recode(committer)
+ if n == "parent":
+ parents.append(v)
+
+ if committer and committer != author:
+ message += "\ncommitter: %s\n" % committer
+ tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
+ tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
+ date = tm + " " + str(tz)
+
+ c = commit(parents=parents, date=date, author=author, desc=message,
+ rev=version)
+ return c
+
+ def gettags(self):
+ tags = {}
+ alltags = {}
+ fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
+ prefix = 'refs/tags/'
+
+ # Build complete list of tags, both annotated and bare ones
+ for line in fh:
+ line = line.strip()
+ node, tag = line.split(None, 1)
+ if not tag.startswith(prefix):
+ continue
+ alltags[tag[len(prefix):]] = node
+ if fh.close():
+ raise util.Abort(_('cannot read tags from %s') % self.path)
+
+ # Filter out tag objects for annotated tag refs
+ for tag in alltags:
+ if tag.endswith('^{}'):
+ tags[tag[:-3]] = alltags[tag]
+ else:
+ if tag + '^{}' in alltags:
+ continue
+ else:
+ tags[tag] = alltags[tag]
+
+ return tags
+
+ def getchangedfiles(self, version, i):
+ changes = []
+ if i is None:
+ fh = self.gitopen("git diff-tree --root -m -r %s" % version)
+ for l in fh:
+ if "\t" not in l:
+ continue
+ m, f = l[:-1].split("\t")
+ changes.append(f)
+ else:
+ fh = self.gitopen('git diff-tree --name-only --root -r %s '
+ '"%s^%s" --' % (version, version, i + 1))
+ changes = [f.rstrip('\n') for f in fh]
+ if fh.close():
+ raise util.Abort(_('cannot read changes in %s') % version)
+
+ return changes
+
+ def getbookmarks(self):
+ bookmarks = {}
+
+ # Interesting references in git are prefixed
+ prefix = 'refs/heads/'
+ prefixlen = len(prefix)
+
+ # factor two commands
+ gitcmd = { 'remote/': 'git ls-remote --heads origin',
+ '': 'git show-ref'}
+
+ # Origin heads
+ for reftype in gitcmd:
+ try:
+ fh = self.gitopen(gitcmd[reftype], noerr=True)
+ for line in fh:
+ line = line.strip()
+ rev, name = line.split(None, 1)
+ if not name.startswith(prefix):
+ continue
+ name = '%s%s' % (reftype, name[prefixlen:])
+ bookmarks[name] = rev
+ except Exception:
+ pass
+
+ return bookmarks
diff --git a/hgext/convert/gnuarch.py b/hgext/convert/gnuarch.py
new file mode 100644
index 0000000..de8dc43
--- /dev/null
+++ b/hgext/convert/gnuarch.py
@@ -0,0 +1,338 @@
+# gnuarch.py - GNU Arch support for the convert extension
+#
+# Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
+# and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import encoding, util
+import os, shutil, tempfile, stat
+from email.Parser import Parser
+
+class gnuarch_source(converter_source, commandline):
+
+ class gnuarch_rev(object):
+ def __init__(self, rev):
+ self.rev = rev
+ self.summary = ''
+ self.date = None
+ self.author = ''
+ self.continuationof = None
+ self.add_files = []
+ self.mod_files = []
+ self.del_files = []
+ self.ren_files = {}
+ self.ren_dirs = {}
+
+ def __init__(self, ui, path, rev=None):
+ super(gnuarch_source, self).__init__(ui, path, rev=rev)
+
+ if not os.path.exists(os.path.join(path, '{arch}')):
+ raise NoRepo(_("%s does not look like a GNU Arch repository")
+ % path)
+
+ # Could use checktool, but we want to check for baz or tla.
+ self.execmd = None
+ if util.findexe('baz'):
+ self.execmd = 'baz'
+ else:
+ if util.findexe('tla'):
+ self.execmd = 'tla'
+ else:
+ raise util.Abort(_('cannot find a GNU Arch tool'))
+
+ commandline.__init__(self, ui, self.execmd)
+
+ self.path = os.path.realpath(path)
+ self.tmppath = None
+
+ self.treeversion = None
+ self.lastrev = None
+ self.changes = {}
+ self.parents = {}
+ self.tags = {}
+ self.catlogparser = Parser()
+ self.encoding = encoding.encoding
+ self.archives = []
+
+ def before(self):
+ # Get registered archives
+ self.archives = [i.rstrip('\n')
+ for i in self.runlines0('archives', '-n')]
+
+ if self.execmd == 'tla':
+ output = self.run0('tree-version', self.path)
+ else:
+ output = self.run0('tree-version', '-d', self.path)
+ self.treeversion = output.strip()
+
+ # Get name of temporary directory
+ version = self.treeversion.split('/')
+ self.tmppath = os.path.join(tempfile.gettempdir(),
+ 'hg-%s' % version[1])
+
+ # Generate parents dictionary
+ self.parents[None] = []
+ treeversion = self.treeversion
+ child = None
+ while treeversion:
+ self.ui.status(_('analyzing tree version %s...\n') % treeversion)
+
+ archive = treeversion.split('/')[0]
+ if archive not in self.archives:
+ self.ui.status(_('tree analysis stopped because it points to '
+ 'an unregistered archive %s...\n') % archive)
+ break
+
+ # Get the complete list of revisions for that tree version
+ output, status = self.runlines('revisions', '-r', '-f', treeversion)
+ self.checkexit(status, 'failed retrieveing revisions for %s'
+ % treeversion)
+
+ # No new iteration unless a revision has a continuation-of header
+ treeversion = None
+
+ for l in output:
+ rev = l.strip()
+ self.changes[rev] = self.gnuarch_rev(rev)
+ self.parents[rev] = []
+
+ # Read author, date and summary
+ catlog, status = self.run('cat-log', '-d', self.path, rev)
+ if status:
+ catlog = self.run0('cat-archive-log', rev)
+ self._parsecatlog(catlog, rev)
+
+ # Populate the parents map
+ self.parents[child].append(rev)
+
+ # Keep track of the current revision as the child of the next
+ # revision scanned
+ child = rev
+
+ # Check if we have to follow the usual incremental history
+ # or if we have to 'jump' to a different treeversion given
+ # by the continuation-of header.
+ if self.changes[rev].continuationof:
+ treeversion = '--'.join(
+ self.changes[rev].continuationof.split('--')[:-1])
+ break
+
+ # If we reached a base-0 revision w/o any continuation-of
+ # header, it means the tree history ends here.
+ if rev[-6:] == 'base-0':
+ break
+
+ def after(self):
+ self.ui.debug('cleaning up %s\n' % self.tmppath)
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+
+ def getheads(self):
+ return self.parents[None]
+
+ def getfile(self, name, rev):
+ if rev != self.lastrev:
+ raise util.Abort(_('internal calling inconsistency'))
+
+ # Raise IOError if necessary (i.e. deleted files).
+ if not os.path.lexists(os.path.join(self.tmppath, name)):
+ raise IOError
+
+ return self._getfile(name, rev)
+
+ def getchanges(self, rev):
+ self._update(rev)
+ changes = []
+ copies = {}
+
+ for f in self.changes[rev].add_files:
+ changes.append((f, rev))
+
+ for f in self.changes[rev].mod_files:
+ changes.append((f, rev))
+
+ for f in self.changes[rev].del_files:
+ changes.append((f, rev))
+
+ for src in self.changes[rev].ren_files:
+ to = self.changes[rev].ren_files[src]
+ changes.append((src, rev))
+ changes.append((to, rev))
+ copies[to] = src
+
+ for src in self.changes[rev].ren_dirs:
+ to = self.changes[rev].ren_dirs[src]
+ chgs, cps = self._rendirchanges(src, to)
+ changes += [(f, rev) for f in chgs]
+ copies.update(cps)
+
+ self.lastrev = rev
+ return sorted(set(changes)), copies
+
+ def getcommit(self, rev):
+ changes = self.changes[rev]
+ return commit(author=changes.author, date=changes.date,
+ desc=changes.summary, parents=self.parents[rev], rev=rev)
+
+ def gettags(self):
+ return self.tags
+
+ def _execute(self, cmd, *args, **kwargs):
+ cmdline = [self.execmd, cmd]
+ cmdline += args
+ cmdline = [util.shellquote(arg) for arg in cmdline]
+ cmdline += ['>', util.nulldev, '2>', util.nulldev]
+ cmdline = util.quotecommand(' '.join(cmdline))
+ self.ui.debug(cmdline, '\n')
+ return os.system(cmdline)
+
+ def _update(self, rev):
+ self.ui.debug('applying revision %s...\n' % rev)
+ changeset, status = self.runlines('replay', '-d', self.tmppath,
+ rev)
+ if status:
+ # Something went wrong while merging (baz or tla
+ # issue?), get latest revision and try from there
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+ self._obtainrevision(rev)
+ else:
+ old_rev = self.parents[rev][0]
+ self.ui.debug('computing changeset between %s and %s...\n'
+ % (old_rev, rev))
+ self._parsechangeset(changeset, rev)
+
+ def _getfile(self, name, rev):
+ mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
+ if stat.S_ISLNK(mode):
+ data = os.readlink(os.path.join(self.tmppath, name))
+ mode = mode and 'l' or ''
+ else:
+ data = open(os.path.join(self.tmppath, name), 'rb').read()
+ mode = (mode & 0111) and 'x' or ''
+ return data, mode
+
+ def _exclude(self, name):
+ exclude = ['{arch}', '.arch-ids', '.arch-inventory']
+ for exc in exclude:
+ if name.find(exc) != -1:
+ return True
+ return False
+
+ def _readcontents(self, path):
+ files = []
+ contents = os.listdir(path)
+ while len(contents) > 0:
+ c = contents.pop()
+ p = os.path.join(path, c)
+ # os.walk could be used, but here we avoid internal GNU
+ # Arch files and directories, thus saving a lot time.
+ if not self._exclude(p):
+ if os.path.isdir(p):
+ contents += [os.path.join(c, f) for f in os.listdir(p)]
+ else:
+ files.append(c)
+ return files
+
+ def _rendirchanges(self, src, dest):
+ changes = []
+ copies = {}
+ files = self._readcontents(os.path.join(self.tmppath, dest))
+ for f in files:
+ s = os.path.join(src, f)
+ d = os.path.join(dest, f)
+ changes.append(s)
+ changes.append(d)
+ copies[d] = s
+ return changes, copies
+
+ def _obtainrevision(self, rev):
+ self.ui.debug('obtaining revision %s...\n' % rev)
+ output = self._execute('get', rev, self.tmppath)
+ self.checkexit(output)
+ self.ui.debug('analyzing revision %s...\n' % rev)
+ files = self._readcontents(self.tmppath)
+ self.changes[rev].add_files += files
+
+ def _stripbasepath(self, path):
+ if path.startswith('./'):
+ return path[2:]
+ return path
+
+ def _parsecatlog(self, data, rev):
+ try:
+ catlog = self.catlogparser.parsestr(data)
+
+ # Commit date
+ self.changes[rev].date = util.datestr(
+ util.strdate(catlog['Standard-date'],
+ '%Y-%m-%d %H:%M:%S'))
+
+ # Commit author
+ self.changes[rev].author = self.recode(catlog['Creator'])
+
+ # Commit description
+ self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
+ catlog.get_payload()))
+ self.changes[rev].summary = self.recode(self.changes[rev].summary)
+
+ # Commit revision origin when dealing with a branch or tag
+ if 'Continuation-of' in catlog:
+ self.changes[rev].continuationof = self.recode(
+ catlog['Continuation-of'])
+ except Exception:
+ raise util.Abort(_('could not parse cat-log of %s') % rev)
+
+ def _parsechangeset(self, data, rev):
+ for l in data:
+ l = l.strip()
+ # Added file (ignore added directory)
+ if l.startswith('A') and not l.startswith('A/'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].add_files.append(file)
+ # Deleted file (ignore deleted directory)
+ elif l.startswith('D') and not l.startswith('D/'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].del_files.append(file)
+ # Modified binary file
+ elif l.startswith('Mb'):
+ file = self._stripbasepath(l[2:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Modified link
+ elif l.startswith('M->'):
+ file = self._stripbasepath(l[3:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Modified file
+ elif l.startswith('M'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Renamed file (or link)
+ elif l.startswith('=>'):
+ files = l[2:].strip().split(' ')
+ if len(files) == 1:
+ files = l[2:].strip().split('\t')
+ src = self._stripbasepath(files[0])
+ dst = self._stripbasepath(files[1])
+ if not self._exclude(src) and not self._exclude(dst):
+ self.changes[rev].ren_files[src] = dst
+ # Conversion from file to link or from link to file (modified)
+ elif l.startswith('ch'):
+ file = self._stripbasepath(l[2:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Renamed directory
+ elif l.startswith('/>'):
+ dirs = l[2:].strip().split(' ')
+ if len(dirs) == 1:
+ dirs = l[2:].strip().split('\t')
+ src = self._stripbasepath(dirs[0])
+ dst = self._stripbasepath(dirs[1])
+ if not self._exclude(src) and not self._exclude(dst):
+ self.changes[rev].ren_dirs[src] = dst
diff --git a/hgext/convert/hg.py b/hgext/convert/hg.py
new file mode 100644
index 0000000..287c771
--- /dev/null
+++ b/hgext/convert/hg.py
@@ -0,0 +1,395 @@
+# hg.py - hg backend for convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# Notes for hg->hg conversion:
+#
+# * Old versions of Mercurial didn't trim the whitespace from the ends
+# of commit messages, but new versions do. Changesets created by
+# those older versions, then converted, may thus have different
+# hashes for changesets that are otherwise identical.
+#
+# * Using "--config convert.hg.saverev=true" will make the source
+# identifier to be stored in the converted revision. This will cause
+# the converted revision to have a different identity than the
+# source.
+
+
+import os, time, cStringIO
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import hg, util, context, bookmarks, error
+
+from common import NoRepo, commit, converter_source, converter_sink
+
+class mercurial_sink(converter_sink):
+ def __init__(self, ui, path):
+ converter_sink.__init__(self, ui, path)
+ self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
+ self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
+ self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
+ self.lastbranch = None
+ if os.path.isdir(path) and len(os.listdir(path)) > 0:
+ try:
+ self.repo = hg.repository(self.ui, path)
+ if not self.repo.local():
+ raise NoRepo(_('%s is not a local Mercurial repository')
+ % path)
+ except error.RepoError, err:
+ ui.traceback()
+ raise NoRepo(err.args[0])
+ else:
+ try:
+ ui.status(_('initializing destination %s repository\n') % path)
+ self.repo = hg.repository(self.ui, path, create=True)
+ if not self.repo.local():
+ raise NoRepo(_('%s is not a local Mercurial repository')
+ % path)
+ self.created.append(path)
+ except error.RepoError:
+ ui.traceback()
+ raise NoRepo(_("could not create hg repository %s as sink")
+ % path)
+ self.lock = None
+ self.wlock = None
+ self.filemapmode = False
+
+ def before(self):
+ self.ui.debug('run hg sink pre-conversion action\n')
+ self.wlock = self.repo.wlock()
+ self.lock = self.repo.lock()
+
+ def after(self):
+ self.ui.debug('run hg sink post-conversion action\n')
+ if self.lock:
+ self.lock.release()
+ if self.wlock:
+ self.wlock.release()
+
+ def revmapfile(self):
+ return self.repo.join("shamap")
+
+ def authorfile(self):
+ return self.repo.join("authormap")
+
+ def getheads(self):
+ h = self.repo.changelog.heads()
+ return [hex(x) for x in h]
+
+ def setbranch(self, branch, pbranches):
+ if not self.clonebranches:
+ return
+
+ setbranch = (branch != self.lastbranch)
+ self.lastbranch = branch
+ if not branch:
+ branch = 'default'
+ pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
+ pbranch = pbranches and pbranches[0][1] or 'default'
+
+ branchpath = os.path.join(self.path, branch)
+ if setbranch:
+ self.after()
+ try:
+ self.repo = hg.repository(self.ui, branchpath)
+ except Exception:
+ self.repo = hg.repository(self.ui, branchpath, create=True)
+ self.before()
+
+ # pbranches may bring revisions from other branches (merge parents)
+ # Make sure we have them, or pull them.
+ missings = {}
+ for b in pbranches:
+ try:
+ self.repo.lookup(b[0])
+ except Exception:
+ missings.setdefault(b[1], []).append(b[0])
+
+ if missings:
+ self.after()
+ for pbranch, heads in missings.iteritems():
+ pbranchpath = os.path.join(self.path, pbranch)
+ prepo = hg.peer(self.ui, {}, pbranchpath)
+ self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
+ self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
+ self.before()
+
+ def _rewritetags(self, source, revmap, data):
+ fp = cStringIO.StringIO()
+ for line in data.splitlines():
+ s = line.split(' ', 1)
+ if len(s) != 2:
+ continue
+ revid = revmap.get(source.lookuprev(s[0]))
+ if not revid:
+ continue
+ fp.write('%s %s\n' % (revid, s[1]))
+ return fp.getvalue()
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+
+ files = dict(files)
+ def getfilectx(repo, memctx, f):
+ v = files[f]
+ data, mode = source.getfile(f, v)
+ if f == '.hgtags':
+ data = self._rewritetags(source, revmap, data)
+ return context.memfilectx(f, data, 'l' in mode, 'x' in mode,
+ copies.get(f))
+
+ pl = []
+ for p in parents:
+ if p not in pl:
+ pl.append(p)
+ parents = pl
+ nparents = len(parents)
+ if self.filemapmode and nparents == 1:
+ m1node = self.repo.changelog.read(bin(parents[0]))[0]
+ parent = parents[0]
+
+ if len(parents) < 2:
+ parents.append(nullid)
+ if len(parents) < 2:
+ parents.append(nullid)
+ p2 = parents.pop(0)
+
+ text = commit.desc
+ extra = commit.extra.copy()
+ if self.branchnames and commit.branch:
+ extra['branch'] = commit.branch
+ if commit.rev:
+ extra['convert_revision'] = commit.rev
+
+ while parents:
+ p1 = p2
+ p2 = parents.pop(0)
+ ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
+ getfilectx, commit.author, commit.date, extra)
+ self.repo.commitctx(ctx)
+ text = "(octopus merge fixup)\n"
+ p2 = hex(self.repo.changelog.tip())
+
+ if self.filemapmode and nparents == 1:
+ man = self.repo.manifest
+ mnode = self.repo.changelog.read(bin(p2))[0]
+ closed = 'close' in commit.extra
+ if not closed and not man.cmp(m1node, man.revision(mnode)):
+ self.ui.status(_("filtering out empty revision\n"))
+ self.repo.rollback(force=True)
+ return parent
+ return p2
+
+ def puttags(self, tags):
+ try:
+ parentctx = self.repo[self.tagsbranch]
+ tagparent = parentctx.node()
+ except error.RepoError:
+ parentctx = None
+ tagparent = nullid
+
+ try:
+ oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
+ except Exception:
+ oldlines = []
+
+ newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
+ if newlines == oldlines:
+ return None, None
+ data = "".join(newlines)
+ def getfilectx(repo, memctx, f):
+ return context.memfilectx(f, data, False, False, None)
+
+ self.ui.status(_("updating tags\n"))
+ date = "%s 0" % int(time.mktime(time.gmtime()))
+ extra = {'branch': self.tagsbranch}
+ ctx = context.memctx(self.repo, (tagparent, None), "update tags",
+ [".hgtags"], getfilectx, "convert-repo", date,
+ extra)
+ self.repo.commitctx(ctx)
+ return hex(self.repo.changelog.tip()), hex(tagparent)
+
+ def setfilemapmode(self, active):
+ self.filemapmode = active
+
+ def putbookmarks(self, updatedbookmark):
+ if not len(updatedbookmark):
+ return
+
+ self.ui.status(_("updating bookmarks\n"))
+ for bookmark in updatedbookmark:
+ self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark])
+ bookmarks.write(self.repo)
+
+ def hascommit(self, rev):
+ if rev not in self.repo and self.clonebranches:
+ raise util.Abort(_('revision %s not found in destination '
+ 'repository (lookups with clonebranches=true '
+ 'are not implemented)') % rev)
+ return rev in self.repo
+
+class mercurial_source(converter_source):
+ def __init__(self, ui, path, rev=None):
+ converter_source.__init__(self, ui, path, rev)
+ self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
+ self.ignored = set()
+ self.saverev = ui.configbool('convert', 'hg.saverev', False)
+ try:
+ self.repo = hg.repository(self.ui, path)
+ # try to provoke an exception if this isn't really a hg
+ # repo, but some other bogus compatible-looking url
+ if not self.repo.local():
+ raise error.RepoError
+ except error.RepoError:
+ ui.traceback()
+ raise NoRepo(_("%s is not a local Mercurial repository") % path)
+ self.lastrev = None
+ self.lastctx = None
+ self._changescache = None
+ self.convertfp = None
+ # Restrict converted revisions to startrev descendants
+ startnode = ui.config('convert', 'hg.startrev')
+ if startnode is not None:
+ try:
+ startnode = self.repo.lookup(startnode)
+ except error.RepoError:
+ raise util.Abort(_('%s is not a valid start revision')
+ % startnode)
+ startrev = self.repo.changelog.rev(startnode)
+ children = {startnode: 1}
+ for rev in self.repo.changelog.descendants([startrev]):
+ children[self.repo.changelog.node(rev)] = 1
+ self.keep = children.__contains__
+ else:
+ self.keep = util.always
+
+ def changectx(self, rev):
+ if self.lastrev != rev:
+ self.lastctx = self.repo[rev]
+ self.lastrev = rev
+ return self.lastctx
+
+ def parents(self, ctx):
+ return [p for p in ctx.parents() if p and self.keep(p.node())]
+
+ def getheads(self):
+ if self.rev:
+ heads = [self.repo[self.rev].node()]
+ else:
+ heads = self.repo.heads()
+ return [hex(h) for h in heads if self.keep(h)]
+
+ def getfile(self, name, rev):
+ try:
+ fctx = self.changectx(rev)[name]
+ return fctx.data(), fctx.flags()
+ except error.LookupError, err:
+ raise IOError(err)
+
+ def getchanges(self, rev):
+ ctx = self.changectx(rev)
+ parents = self.parents(ctx)
+ if not parents:
+ files = sorted(ctx.manifest())
+ # getcopies() is not needed for roots, but it is a simple way to
+ # detect missing revlogs and abort on errors or populate
+ # self.ignored
+ self.getcopies(ctx, parents, files)
+ return [(f, rev) for f in files if f not in self.ignored], {}
+ if self._changescache and self._changescache[0] == rev:
+ m, a, r = self._changescache[1]
+ else:
+ m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
+ # getcopies() detects missing revlogs early, run it before
+ # filtering the changes.
+ copies = self.getcopies(ctx, parents, m + a)
+ changes = [(name, rev) for name in m + a + r
+ if name not in self.ignored]
+ return sorted(changes), copies
+
+ def getcopies(self, ctx, parents, files):
+ copies = {}
+ for name in files:
+ if name in self.ignored:
+ continue
+ try:
+ copysource, copynode = ctx.filectx(name).renamed()
+ if copysource in self.ignored or not self.keep(copynode):
+ continue
+ # Ignore copy sources not in parent revisions
+ found = False
+ for p in parents:
+ if copysource in p:
+ found = True
+ break
+ if not found:
+ continue
+ copies[name] = copysource
+ except TypeError:
+ pass
+ except error.LookupError, e:
+ if not self.ignoreerrors:
+ raise
+ self.ignored.add(name)
+ self.ui.warn(_('ignoring: %s\n') % e)
+ return copies
+
+ def getcommit(self, rev):
+ ctx = self.changectx(rev)
+ parents = [p.hex() for p in self.parents(ctx)]
+ if self.saverev:
+ crev = rev
+ else:
+ crev = None
+ return commit(author=ctx.user(),
+ date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
+ desc=ctx.description(), rev=crev, parents=parents,
+ branch=ctx.branch(), extra=ctx.extra(),
+ sortkey=ctx.rev())
+
+ def gettags(self):
+ tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
+ return dict([(name, hex(node)) for name, node in tags
+ if self.keep(node)])
+
+ def getchangedfiles(self, rev, i):
+ ctx = self.changectx(rev)
+ parents = self.parents(ctx)
+ if not parents and i is None:
+ i = 0
+ changes = [], ctx.manifest().keys(), []
+ else:
+ i = i or 0
+ changes = self.repo.status(parents[i].node(), ctx.node())[:3]
+ changes = [[f for f in l if f not in self.ignored] for l in changes]
+
+ if i == 0:
+ self._changescache = (rev, changes)
+
+ return changes[0] + changes[1] + changes[2]
+
+ def converted(self, rev, destrev):
+ if self.convertfp is None:
+ self.convertfp = open(self.repo.join('shamap'), 'a')
+ self.convertfp.write('%s %s\n' % (destrev, rev))
+ self.convertfp.flush()
+
+ def before(self):
+ self.ui.debug('run hg source pre-conversion action\n')
+
+ def after(self):
+ self.ui.debug('run hg source post-conversion action\n')
+
+ def hasnativeorder(self):
+ return True
+
+ def lookuprev(self, rev):
+ try:
+ return hex(self.repo.lookup(rev))
+ except error.RepoError:
+ return None
+
+ def getbookmarks(self):
+ return bookmarks.listbookmarks(self.repo)
diff --git a/hgext/convert/monotone.py b/hgext/convert/monotone.py
new file mode 100644
index 0000000..969e0e5
--- /dev/null
+++ b/hgext/convert/monotone.py
@@ -0,0 +1,360 @@
+# monotone.py - monotone support for the convert extension
+#
+# Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
+# others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, re
+from mercurial import util
+from common import NoRepo, commit, converter_source, checktool
+from common import commandline
+from mercurial.i18n import _
+
+class monotone_source(converter_source, commandline):
+ def __init__(self, ui, path=None, rev=None):
+ converter_source.__init__(self, ui, path, rev)
+ commandline.__init__(self, ui, 'mtn')
+
+ self.ui = ui
+ self.path = path
+ self.automatestdio = False
+ self.rev = rev
+
+ norepo = NoRepo(_("%s does not look like a monotone repository")
+ % path)
+ if not os.path.exists(os.path.join(path, '_MTN')):
+ # Could be a monotone repository (SQLite db file)
+ try:
+ f = file(path, 'rb')
+ header = f.read(16)
+ f.close()
+ except IOError:
+ header = ''
+ if header != 'SQLite format 3\x00':
+ raise norepo
+
+ # regular expressions for parsing monotone output
+ space = r'\s*'
+ name = r'\s+"((?:\\"|[^"])*)"\s*'
+ value = name
+ revision = r'\s+\[(\w+)\]\s*'
+ lines = r'(?:.|\n)+'
+
+ self.dir_re = re.compile(space + "dir" + name)
+ self.file_re = re.compile(space + "file" + name +
+ "content" + revision)
+ self.add_file_re = re.compile(space + "add_file" + name +
+ "content" + revision)
+ self.patch_re = re.compile(space + "patch" + name +
+ "from" + revision + "to" + revision)
+ self.rename_re = re.compile(space + "rename" + name + "to" + name)
+ self.delete_re = re.compile(space + "delete" + name)
+ self.tag_re = re.compile(space + "tag" + name + "revision" +
+ revision)
+ self.cert_re = re.compile(lines + space + "name" + name +
+ "value" + value)
+
+ attr = space + "file" + lines + space + "attr" + space
+ self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
+ space + '"true"')
+
+ # cached data
+ self.manifest_rev = None
+ self.manifest = None
+ self.files = None
+ self.dirs = None
+
+ checktool('mtn', abort=False)
+
+ def mtnrun(self, *args, **kwargs):
+ if self.automatestdio:
+ return self.mtnrunstdio(*args, **kwargs)
+ else:
+ return self.mtnrunsingle(*args, **kwargs)
+
+ def mtnrunsingle(self, *args, **kwargs):
+ kwargs['d'] = self.path
+ return self.run0('automate', *args, **kwargs)
+
+ def mtnrunstdio(self, *args, **kwargs):
+ # Prepare the command in automate stdio format
+ command = []
+ for k, v in kwargs.iteritems():
+ command.append("%s:%s" % (len(k), k))
+ if v:
+ command.append("%s:%s" % (len(v), v))
+ if command:
+ command.insert(0, 'o')
+ command.append('e')
+
+ command.append('l')
+ for arg in args:
+ command += "%s:%s" % (len(arg), arg)
+ command.append('e')
+ command = ''.join(command)
+
+ self.ui.debug("mtn: sending '%s'\n" % command)
+ self.mtnwritefp.write(command)
+ self.mtnwritefp.flush()
+
+ return self.mtnstdioreadcommandoutput(command)
+
+ def mtnstdioreadpacket(self):
+ read = None
+ commandnbr = ''
+ while read != ':':
+ read = self.mtnreadfp.read(1)
+ if not read:
+ raise util.Abort(_('bad mtn packet - no end of commandnbr'))
+ commandnbr += read
+ commandnbr = commandnbr[:-1]
+
+ stream = self.mtnreadfp.read(1)
+ if stream not in 'mewptl':
+ raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
+
+ read = self.mtnreadfp.read(1)
+ if read != ':':
+ raise util.Abort(_('bad mtn packet - no divider before size'))
+
+ read = None
+ lengthstr = ''
+ while read != ':':
+ read = self.mtnreadfp.read(1)
+ if not read:
+ raise util.Abort(_('bad mtn packet - no end of packet size'))
+ lengthstr += read
+ try:
+ length = long(lengthstr[:-1])
+ except TypeError:
+ raise util.Abort(_('bad mtn packet - bad packet size %s')
+ % lengthstr)
+
+ read = self.mtnreadfp.read(length)
+ if len(read) != length:
+ raise util.Abort(_("bad mtn packet - unable to read full packet "
+ "read %s of %s") % (len(read), length))
+
+ return (commandnbr, stream, length, read)
+
+ def mtnstdioreadcommandoutput(self, command):
+ retval = []
+ while True:
+ commandnbr, stream, length, output = self.mtnstdioreadpacket()
+ self.ui.debug('mtn: read packet %s:%s:%s\n' %
+ (commandnbr, stream, length))
+
+ if stream == 'l':
+ # End of command
+ if output != '0':
+ raise util.Abort(_("mtn command '%s' returned %s") %
+ (command, output))
+ break
+ elif stream in 'ew':
+ # Error, warning output
+ self.ui.warn(_('%s error:\n') % self.command)
+ self.ui.warn(output)
+ elif stream == 'p':
+ # Progress messages
+ self.ui.debug('mtn: ' + output)
+ elif stream == 'm':
+ # Main stream - command output
+ retval.append(output)
+
+ return ''.join(retval)
+
+ def mtnloadmanifest(self, rev):
+ if self.manifest_rev == rev:
+ return
+ self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
+ self.manifest_rev = rev
+ self.files = {}
+ self.dirs = {}
+
+ for e in self.manifest:
+ m = self.file_re.match(e)
+ if m:
+ attr = ""
+ name = m.group(1)
+ node = m.group(2)
+ if self.attr_execute_re.match(e):
+ attr += "x"
+ self.files[name] = (node, attr)
+ m = self.dir_re.match(e)
+ if m:
+ self.dirs[m.group(1)] = True
+
+ def mtnisfile(self, name, rev):
+ # a non-file could be a directory or a deleted or renamed file
+ self.mtnloadmanifest(rev)
+ return name in self.files
+
+ def mtnisdir(self, name, rev):
+ self.mtnloadmanifest(rev)
+ return name in self.dirs
+
+ def mtngetcerts(self, rev):
+ certs = {"author":"<missing>", "date":"<missing>",
+ "changelog":"<missing>", "branch":"<missing>"}
+ certlist = self.mtnrun("certs", rev)
+ # mtn < 0.45:
+ # key "test@selenic.com"
+ # mtn >= 0.45:
+ # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
+ certlist = re.split('\n\n key ["\[]', certlist)
+ for e in certlist:
+ m = self.cert_re.match(e)
+ if m:
+ name, value = m.groups()
+ value = value.replace(r'\"', '"')
+ value = value.replace(r'\\', '\\')
+ certs[name] = value
+ # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
+ # and all times are stored in UTC
+ certs["date"] = certs["date"].split('.')[0] + " UTC"
+ return certs
+
+ # implement the converter_source interface:
+
+ def getheads(self):
+ if not self.rev:
+ return self.mtnrun("leaves").splitlines()
+ else:
+ return [self.rev]
+
+ def getchanges(self, rev):
+ #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
+ revision = self.mtnrun("get_revision", rev).split("\n\n")
+ files = {}
+ ignoremove = {}
+ renameddirs = []
+ copies = {}
+ for e in revision:
+ m = self.add_file_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ ignoremove[m.group(1)] = rev
+ m = self.patch_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ # Delete/rename is handled later when the convert engine
+ # discovers an IOError exception from getfile,
+ # but only if we add the "from" file to the list of changes.
+ m = self.delete_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ m = self.rename_re.match(e)
+ if m:
+ toname = m.group(2)
+ fromname = m.group(1)
+ if self.mtnisfile(toname, rev):
+ ignoremove[toname] = 1
+ copies[toname] = fromname
+ files[toname] = rev
+ files[fromname] = rev
+ elif self.mtnisdir(toname, rev):
+ renameddirs.append((fromname, toname))
+
+ # Directory renames can be handled only once we have recorded
+ # all new files
+ for fromdir, todir in renameddirs:
+ renamed = {}
+ for tofile in self.files:
+ if tofile in ignoremove:
+ continue
+ if tofile.startswith(todir + '/'):
+ renamed[tofile] = fromdir + tofile[len(todir):]
+ # Avoid chained moves like:
+ # d1(/a) => d3/d1(/a)
+ # d2 => d3
+ ignoremove[tofile] = 1
+ for tofile, fromfile in renamed.items():
+ self.ui.debug (_("copying file in renamed directory "
+ "from '%s' to '%s'")
+ % (fromfile, tofile), '\n')
+ files[tofile] = rev
+ copies[tofile] = fromfile
+ for fromfile in renamed.values():
+ files[fromfile] = rev
+
+ return (files.items(), copies)
+
+ def getfile(self, name, rev):
+ if not self.mtnisfile(name, rev):
+ raise IOError # file was deleted or renamed
+ try:
+ data = self.mtnrun("get_file_of", name, r=rev)
+ except Exception:
+ raise IOError # file was deleted or renamed
+ self.mtnloadmanifest(rev)
+ node, attr = self.files.get(name, (None, ""))
+ return data, attr
+
+ def getcommit(self, rev):
+ extra = {}
+ certs = self.mtngetcerts(rev)
+ if certs.get('suspend') == certs["branch"]:
+ extra['close'] = '1'
+ return commit(
+ author=certs["author"],
+ date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
+ desc=certs["changelog"],
+ rev=rev,
+ parents=self.mtnrun("parents", rev).splitlines(),
+ branch=certs["branch"],
+ extra=extra)
+
+ def gettags(self):
+ tags = {}
+ for e in self.mtnrun("tags").split("\n\n"):
+ m = self.tag_re.match(e)
+ if m:
+ tags[m.group(1)] = m.group(2)
+ return tags
+
+ def getchangedfiles(self, rev, i):
+ # This function is only needed to support --filemap
+ # ... and we don't support that
+ raise NotImplementedError
+
+ def before(self):
+ # Check if we have a new enough version to use automate stdio
+ version = 0.0
+ try:
+ versionstr = self.mtnrunsingle("interface_version")
+ version = float(versionstr)
+ except Exception:
+ raise util.Abort(_("unable to determine mtn automate interface "
+ "version"))
+
+ if version >= 12.0:
+ self.automatestdio = True
+ self.ui.debug("mtn automate version %s - using automate stdio\n" %
+ version)
+
+ # launch the long-running automate stdio process
+ self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
+ '-d', self.path)
+ # read the headers
+ read = self.mtnreadfp.readline()
+ if read != 'format-version: 2\n':
+ raise util.Abort(_('mtn automate stdio header unexpected: %s')
+ % read)
+ while read != '\n':
+ read = self.mtnreadfp.readline()
+ if not read:
+ raise util.Abort(_("failed to reach end of mtn automate "
+ "stdio headers"))
+ else:
+ self.ui.debug("mtn automate version %s - not using automate stdio "
+ "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
+
+ def after(self):
+ if self.automatestdio:
+ self.mtnwritefp.close()
+ self.mtnwritefp = None
+ self.mtnreadfp.close()
+ self.mtnreadfp = None
+
diff --git a/hgext/convert/p4.py b/hgext/convert/p4.py
new file mode 100644
index 0000000..76b28e7
--- /dev/null
+++ b/hgext/convert/p4.py
@@ -0,0 +1,203 @@
+# Perforce source for convert extension.
+#
+# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial import util
+from mercurial.i18n import _
+
+from common import commit, converter_source, checktool, NoRepo
+import marshal
+import re
+
+def loaditer(f):
+ "Yield the dictionary objects generated by p4"
+ try:
+ while True:
+ d = marshal.load(f)
+ if not d:
+ break
+ yield d
+ except EOFError:
+ pass
+
+class p4_source(converter_source):
+ def __init__(self, ui, path, rev=None):
+ super(p4_source, self).__init__(ui, path, rev=rev)
+
+ if "/" in path and not path.startswith('//'):
+ raise NoRepo(_('%s does not look like a P4 repository') % path)
+
+ checktool('p4', abort=False)
+
+ self.p4changes = {}
+ self.heads = {}
+ self.changeset = {}
+ self.files = {}
+ self.tags = {}
+ self.lastbranch = {}
+ self.parent = {}
+ self.encoding = "latin_1"
+ self.depotname = {} # mapping from local name to depot name
+ self.re_type = re.compile(
+ "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
+ "(\+\w+)?$")
+ self.re_keywords = re.compile(
+ r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
+ r":[^$\n]*\$")
+ self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
+
+ self._parse(ui, path)
+
+ def _parse_view(self, path):
+ "Read changes affecting the path"
+ cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
+ stdout = util.popen(cmd, mode='rb')
+ for d in loaditer(stdout):
+ c = d.get("change", None)
+ if c:
+ self.p4changes[c] = True
+
+ def _parse(self, ui, path):
+ "Prepare list of P4 filenames and revisions to import"
+ ui.status(_('reading p4 views\n'))
+
+ # read client spec or view
+ if "/" in path:
+ self._parse_view(path)
+ if path.startswith("//") and path.endswith("/..."):
+ views = {path[:-3]:""}
+ else:
+ views = {"//": ""}
+ else:
+ cmd = 'p4 -G client -o %s' % util.shellquote(path)
+ clientspec = marshal.load(util.popen(cmd, mode='rb'))
+
+ views = {}
+ for client in clientspec:
+ if client.startswith("View"):
+ sview, cview = clientspec[client].split()
+ self._parse_view(sview)
+ if sview.endswith("...") and cview.endswith("..."):
+ sview = sview[:-3]
+ cview = cview[:-3]
+ cview = cview[2:]
+ cview = cview[cview.find("/") + 1:]
+ views[sview] = cview
+
+ # list of changes that affect our source files
+ self.p4changes = self.p4changes.keys()
+ self.p4changes.sort(key=int)
+
+ # list with depot pathnames, longest first
+ vieworder = views.keys()
+ vieworder.sort(key=len, reverse=True)
+
+ # handle revision limiting
+ startrev = self.ui.config('convert', 'p4.startrev', default=0)
+ self.p4changes = [x for x in self.p4changes
+ if ((not startrev or int(x) >= int(startrev)) and
+ (not self.rev or int(x) <= int(self.rev)))]
+
+ # now read the full changelists to get the list of file revisions
+ ui.status(_('collecting p4 changelists\n'))
+ lastid = None
+ for change in self.p4changes:
+ cmd = "p4 -G describe -s %s" % change
+ stdout = util.popen(cmd, mode='rb')
+ d = marshal.load(stdout)
+ desc = self.recode(d["desc"])
+ shortdesc = desc.split("\n", 1)[0]
+ t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
+ ui.status(util.ellipsis(t, 80) + '\n')
+
+ if lastid:
+ parents = [lastid]
+ else:
+ parents = []
+
+ date = (int(d["time"]), 0) # timezone not set
+ c = commit(author=self.recode(d["user"]),
+ date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ parents=parents, desc=desc, branch='',
+ extra={"p4": change})
+
+ files = []
+ i = 0
+ while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
+ oldname = d["depotFile%d" % i]
+ filename = None
+ for v in vieworder:
+ if oldname.startswith(v):
+ filename = views[v] + oldname[len(v):]
+ break
+ if filename:
+ files.append((filename, d["rev%d" % i]))
+ self.depotname[filename] = oldname
+ i += 1
+ self.changeset[change] = c
+ self.files[change] = files
+ lastid = change
+
+ if lastid:
+ self.heads = [lastid]
+
+ def getheads(self):
+ return self.heads
+
+ def getfile(self, name, rev):
+ cmd = 'p4 -G print %s' \
+ % util.shellquote("%s#%s" % (self.depotname[name], rev))
+ stdout = util.popen(cmd, mode='rb')
+
+ mode = None
+ contents = ""
+ keywords = None
+
+ for d in loaditer(stdout):
+ code = d["code"]
+ data = d.get("data")
+
+ if code == "error":
+ raise IOError(d["generic"], data)
+
+ elif code == "stat":
+ p4type = self.re_type.match(d["type"])
+ if p4type:
+ mode = ""
+ flags = (p4type.group(1) or "") + (p4type.group(3) or "")
+ if "x" in flags:
+ mode = "x"
+ if p4type.group(2) == "symlink":
+ mode = "l"
+ if "ko" in flags:
+ keywords = self.re_keywords_old
+ elif "k" in flags:
+ keywords = self.re_keywords
+
+ elif code == "text" or code == "binary":
+ contents += data
+
+ if mode is None:
+ raise IOError(0, "bad stat")
+
+ if keywords:
+ contents = keywords.sub("$\\1$", contents)
+ if mode == "l" and contents.endswith("\n"):
+ contents = contents[:-1]
+
+ return contents, mode
+
+ def getchanges(self, rev):
+ return self.files[rev], {}
+
+ def getcommit(self, rev):
+ return self.changeset[rev]
+
+ def gettags(self):
+ return self.tags
+
+ def getchangedfiles(self, rev, i):
+ return sorted([x[0] for x in self.files[rev]])
diff --git a/hgext/convert/subversion.py b/hgext/convert/subversion.py
new file mode 100644
index 0000000..094988b
--- /dev/null
+++ b/hgext/convert/subversion.py
@@ -0,0 +1,1251 @@
+# Subversion 1.4/1.5 Python API backend
+#
+# Copyright(C) 2007 Daniel Holth et al
+
+import os, re, sys, tempfile, urllib, urllib2, xml.dom.minidom
+import cPickle as pickle
+
+from mercurial import strutil, scmutil, util, encoding
+from mercurial.i18n import _
+
+propertycache = util.propertycache
+
+# Subversion stuff. Works best with very recent Python SVN bindings
+# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
+# these bindings.
+
+from cStringIO import StringIO
+
+from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
+from common import commandline, converter_source, converter_sink, mapfile
+
+try:
+ from svn.core import SubversionException, Pool
+ import svn
+ import svn.client
+ import svn.core
+ import svn.ra
+ import svn.delta
+ import transport
+ import warnings
+ warnings.filterwarnings('ignore',
+ module='svn.core',
+ category=DeprecationWarning)
+
+except ImportError:
+ svn = None
+
+class SvnPathNotFound(Exception):
+ pass
+
+def revsplit(rev):
+ """Parse a revision string and return (uuid, path, revnum)."""
+ url, revnum = rev.rsplit('@', 1)
+ parts = url.split('/', 1)
+ mod = ''
+ if len(parts) > 1:
+ mod = '/' + parts[1]
+ return parts[0][4:], mod, int(revnum)
+
+def quote(s):
+ # As of svn 1.7, many svn calls expect "canonical" paths. In
+ # theory, we should call svn.core.*canonicalize() on all paths
+ # before passing them to the API. Instead, we assume the base url
+ # is canonical and copy the behaviour of svn URL encoding function
+ # so we can extend it safely with new components. The "safe"
+ # characters were taken from the "svn_uri__char_validity" table in
+ # libsvn_subr/path.c.
+ return urllib.quote(s, "!$&'()*+,-./:=@_~")
+
+def geturl(path):
+ try:
+ return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
+ except SubversionException:
+ # svn.client.url_from_path() fails with local repositories
+ pass
+ if os.path.isdir(path):
+ path = os.path.normpath(os.path.abspath(path))
+ if os.name == 'nt':
+ path = '/' + util.normpath(path)
+ # Module URL is later compared with the repository URL returned
+ # by svn API, which is UTF-8.
+ path = encoding.tolocal(path)
+ path = 'file://%s' % quote(path)
+ return svn.core.svn_path_canonicalize(path)
+
+def optrev(number):
+ optrev = svn.core.svn_opt_revision_t()
+ optrev.kind = svn.core.svn_opt_revision_number
+ optrev.value.number = number
+ return optrev
+
+class changedpath(object):
+ def __init__(self, p):
+ self.copyfrom_path = p.copyfrom_path
+ self.copyfrom_rev = p.copyfrom_rev
+ self.action = p.action
+
+def get_log_child(fp, url, paths, start, end, limit=0,
+ discover_changed_paths=True, strict_node_history=False):
+ protocol = -1
+ def receiver(orig_paths, revnum, author, date, message, pool):
+ if orig_paths is not None:
+ for k, v in orig_paths.iteritems():
+ orig_paths[k] = changedpath(v)
+ pickle.dump((orig_paths, revnum, author, date, message),
+ fp, protocol)
+
+ try:
+ # Use an ra of our own so that our parent can consume
+ # our results without confusing the server.
+ t = transport.SvnRaTransport(url=url)
+ svn.ra.get_log(t.ra, paths, start, end, limit,
+ discover_changed_paths,
+ strict_node_history,
+ receiver)
+ except IOError:
+ # Caller may interrupt the iteration
+ pickle.dump(None, fp, protocol)
+ except Exception, inst:
+ pickle.dump(str(inst), fp, protocol)
+ else:
+ pickle.dump(None, fp, protocol)
+ fp.close()
+ # With large history, cleanup process goes crazy and suddenly
+ # consumes *huge* amount of memory. The output file being closed,
+ # there is no need for clean termination.
+ os._exit(0)
+
+def debugsvnlog(ui, **opts):
+ """Fetch SVN log in a subprocess and channel them back to parent to
+ avoid memory collection issues.
+ """
+ if svn is None:
+ raise util.Abort(_('debugsvnlog could not load Subversion python '
+ 'bindings'))
+
+ util.setbinary(sys.stdin)
+ util.setbinary(sys.stdout)
+ args = decodeargs(sys.stdin.read())
+ get_log_child(sys.stdout, *args)
+
+class logstream(object):
+ """Interruptible revision log iterator."""
+ def __init__(self, stdout):
+ self._stdout = stdout
+
+ def __iter__(self):
+ while True:
+ try:
+ entry = pickle.load(self._stdout)
+ except EOFError:
+ raise util.Abort(_('Mercurial failed to run itself, check'
+ ' hg executable is in PATH'))
+ try:
+ orig_paths, revnum, author, date, message = entry
+ except (TypeError, ValueError):
+ if entry is None:
+ break
+ raise util.Abort(_("log stream exception '%s'") % entry)
+ yield entry
+
+ def close(self):
+ if self._stdout:
+ self._stdout.close()
+ self._stdout = None
+
+
+# Check to see if the given path is a local Subversion repo. Verify this by
+# looking for several svn-specific files and directories in the given
+# directory.
+def filecheck(ui, path, proto):
+ for x in ('locks', 'hooks', 'format', 'db'):
+ if not os.path.exists(os.path.join(path, x)):
+ return False
+ return True
+
+# Check to see if a given path is the root of an svn repo over http. We verify
+# this by requesting a version-controlled URL we know can't exist and looking
+# for the svn-specific "not found" XML.
+def httpcheck(ui, path, proto):
+ try:
+ opener = urllib2.build_opener()
+ rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
+ data = rsp.read()
+ except urllib2.HTTPError, inst:
+ if inst.code != 404:
+ # Except for 404 we cannot know for sure this is not an svn repo
+ ui.warn(_('svn: cannot probe remote repository, assume it could '
+ 'be a subversion repository. Use --source-type if you '
+ 'know better.\n'))
+ return True
+ data = inst.fp.read()
+ except Exception:
+ # Could be urllib2.URLError if the URL is invalid or anything else.
+ return False
+ return '<m:human-readable errcode="160013">' in data
+
+protomap = {'http': httpcheck,
+ 'https': httpcheck,
+ 'file': filecheck,
+ }
+def issvnurl(ui, url):
+ try:
+ proto, path = url.split('://', 1)
+ if proto == 'file':
+ if (os.name == 'nt' and path[:1] == '/' and path[1:2].isalpha()
+ and path[2:6].lower() == '%3a/'):
+ path = path[:2] + ':/' + path[6:]
+ path = urllib.url2pathname(path)
+ except ValueError:
+ proto = 'file'
+ path = os.path.abspath(url)
+ if proto == 'file':
+ path = util.pconvert(path)
+ check = protomap.get(proto, lambda *args: False)
+ while '/' in path:
+ if check(ui, path, proto):
+ return True
+ path = path.rsplit('/', 1)[0]
+ return False
+
+# SVN conversion code stolen from bzr-svn and tailor
+#
+# Subversion looks like a versioned filesystem, branches structures
+# are defined by conventions and not enforced by the tool. First,
+# we define the potential branches (modules) as "trunk" and "branches"
+# children directories. Revisions are then identified by their
+# module and revision number (and a repository identifier).
+#
+# The revision graph is really a tree (or a forest). By default, a
+# revision parent is the previous revision in the same module. If the
+# module directory is copied/moved from another module then the
+# revision is the module root and its parent the source revision in
+# the parent module. A revision has at most one parent.
+#
+class svn_source(converter_source):
+ def __init__(self, ui, url, rev=None):
+ super(svn_source, self).__init__(ui, url, rev=rev)
+
+ if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
+ (os.path.exists(url) and
+ os.path.exists(os.path.join(url, '.svn'))) or
+ issvnurl(ui, url)):
+ raise NoRepo(_("%s does not look like a Subversion repository")
+ % url)
+ if svn is None:
+ raise MissingTool(_('could not load Subversion python bindings'))
+
+ try:
+ version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
+ if version < (1, 4):
+ raise MissingTool(_('Subversion python bindings %d.%d found, '
+ '1.4 or later required') % version)
+ except AttributeError:
+ raise MissingTool(_('Subversion python bindings are too old, 1.4 '
+ 'or later required'))
+
+ self.lastrevs = {}
+
+ latest = None
+ try:
+ # Support file://path@rev syntax. Useful e.g. to convert
+ # deleted branches.
+ at = url.rfind('@')
+ if at >= 0:
+ latest = int(url[at + 1:])
+ url = url[:at]
+ except ValueError:
+ pass
+ self.url = geturl(url)
+ self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
+ try:
+ self.transport = transport.SvnRaTransport(url=self.url)
+ self.ra = self.transport.ra
+ self.ctx = self.transport.client
+ self.baseurl = svn.ra.get_repos_root(self.ra)
+ # Module is either empty or a repository path starting with
+ # a slash and not ending with a slash.
+ self.module = urllib.unquote(self.url[len(self.baseurl):])
+ self.prevmodule = None
+ self.rootmodule = self.module
+ self.commits = {}
+ self.paths = {}
+ self.uuid = svn.ra.get_uuid(self.ra)
+ except SubversionException:
+ ui.traceback()
+ raise NoRepo(_("%s does not look like a Subversion repository")
+ % self.url)
+
+ if rev:
+ try:
+ latest = int(rev)
+ except ValueError:
+ raise util.Abort(_('svn: revision %s is not an integer') % rev)
+
+ self.trunkname = self.ui.config('convert', 'svn.trunk',
+ 'trunk').strip('/')
+ self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
+ try:
+ self.startrev = int(self.startrev)
+ if self.startrev < 0:
+ self.startrev = 0
+ except ValueError:
+ raise util.Abort(_('svn: start revision %s is not an integer')
+ % self.startrev)
+
+ try:
+ self.head = self.latest(self.module, latest)
+ except SvnPathNotFound:
+ self.head = None
+ if not self.head:
+ raise util.Abort(_('no revision found in module %s')
+ % self.module)
+ self.last_changed = self.revnum(self.head)
+
+ self._changescache = None
+
+ if os.path.exists(os.path.join(url, '.svn/entries')):
+ self.wc = url
+ else:
+ self.wc = None
+ self.convertfp = None
+
+ def setrevmap(self, revmap):
+ lastrevs = {}
+ for revid in revmap.iterkeys():
+ uuid, module, revnum = revsplit(revid)
+ lastrevnum = lastrevs.setdefault(module, revnum)
+ if revnum > lastrevnum:
+ lastrevs[module] = revnum
+ self.lastrevs = lastrevs
+
+ def exists(self, path, optrev):
+ try:
+ svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
+ optrev, False, self.ctx)
+ return True
+ except SubversionException:
+ return False
+
+ def getheads(self):
+
+ def isdir(path, revnum):
+ kind = self._checkpath(path, revnum)
+ return kind == svn.core.svn_node_dir
+
+ def getcfgpath(name, rev):
+ cfgpath = self.ui.config('convert', 'svn.' + name)
+ if cfgpath is not None and cfgpath.strip() == '':
+ return None
+ path = (cfgpath or name).strip('/')
+ if not self.exists(path, rev):
+ if self.module.endswith(path) and name == 'trunk':
+ # we are converting from inside this directory
+ return None
+ if cfgpath:
+ raise util.Abort(_('expected %s to be at %r, but not found')
+ % (name, path))
+ return None
+ self.ui.note(_('found %s at %r\n') % (name, path))
+ return path
+
+ rev = optrev(self.last_changed)
+ oldmodule = ''
+ trunk = getcfgpath('trunk', rev)
+ self.tags = getcfgpath('tags', rev)
+ branches = getcfgpath('branches', rev)
+
+ # If the project has a trunk or branches, we will extract heads
+ # from them. We keep the project root otherwise.
+ if trunk:
+ oldmodule = self.module or ''
+ self.module += '/' + trunk
+ self.head = self.latest(self.module, self.last_changed)
+ if not self.head:
+ raise util.Abort(_('no revision found in module %s')
+ % self.module)
+
+ # First head in the list is the module's head
+ self.heads = [self.head]
+ if self.tags is not None:
+ self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
+
+ # Check if branches bring a few more heads to the list
+ if branches:
+ rpath = self.url.strip('/')
+ branchnames = svn.client.ls(rpath + '/' + quote(branches),
+ rev, False, self.ctx)
+ for branch in branchnames.keys():
+ module = '%s/%s/%s' % (oldmodule, branches, branch)
+ if not isdir(module, self.last_changed):
+ continue
+ brevid = self.latest(module, self.last_changed)
+ if not brevid:
+ self.ui.note(_('ignoring empty branch %s\n') % branch)
+ continue
+ self.ui.note(_('found branch %s at %d\n') %
+ (branch, self.revnum(brevid)))
+ self.heads.append(brevid)
+
+ if self.startrev and self.heads:
+ if len(self.heads) > 1:
+ raise util.Abort(_('svn: start revision is not supported '
+ 'with more than one branch'))
+ revnum = self.revnum(self.heads[0])
+ if revnum < self.startrev:
+ raise util.Abort(
+ _('svn: no revision found after start revision %d')
+ % self.startrev)
+
+ return self.heads
+
+ def getchanges(self, rev):
+ if self._changescache and self._changescache[0] == rev:
+ return self._changescache[1]
+ self._changescache = None
+ (paths, parents) = self.paths[rev]
+ if parents:
+ files, self.removed, copies = self.expandpaths(rev, paths, parents)
+ else:
+ # Perform a full checkout on roots
+ uuid, module, revnum = revsplit(rev)
+ entries = svn.client.ls(self.baseurl + quote(module),
+ optrev(revnum), True, self.ctx)
+ files = [n for n, e in entries.iteritems()
+ if e.kind == svn.core.svn_node_file]
+ copies = {}
+ self.removed = set()
+
+ files.sort()
+ files = zip(files, [rev] * len(files))
+
+ # caller caches the result, so free it here to release memory
+ del self.paths[rev]
+ return (files, copies)
+
+ def getchangedfiles(self, rev, i):
+ changes = self.getchanges(rev)
+ self._changescache = (rev, changes)
+ return [f[0] for f in changes[0]]
+
+ def getcommit(self, rev):
+ if rev not in self.commits:
+ uuid, module, revnum = revsplit(rev)
+ self.module = module
+ self.reparent(module)
+ # We assume that:
+ # - requests for revisions after "stop" come from the
+ # revision graph backward traversal. Cache all of them
+ # down to stop, they will be used eventually.
+ # - requests for revisions before "stop" come to get
+ # isolated branches parents. Just fetch what is needed.
+ stop = self.lastrevs.get(module, 0)
+ if revnum < stop:
+ stop = revnum + 1
+ self._fetch_revisions(revnum, stop)
+ if rev not in self.commits:
+ raise util.Abort(_('svn: revision %s not found') % revnum)
+ commit = self.commits[rev]
+ # caller caches the result, so free it here to release memory
+ del self.commits[rev]
+ return commit
+
+ def gettags(self):
+ tags = {}
+ if self.tags is None:
+ return tags
+
+ # svn tags are just a convention, project branches left in a
+ # 'tags' directory. There is no other relationship than
+ # ancestry, which is expensive to discover and makes them hard
+ # to update incrementally. Worse, past revisions may be
+ # referenced by tags far away in the future, requiring a deep
+ # history traversal on every calculation. Current code
+ # performs a single backward traversal, tracking moves within
+ # the tags directory (tag renaming) and recording a new tag
+ # everytime a project is copied from outside the tags
+ # directory. It also lists deleted tags, this behaviour may
+ # change in the future.
+ pendings = []
+ tagspath = self.tags
+ start = svn.ra.get_latest_revnum(self.ra)
+ stream = self._getlog([self.tags], start, self.startrev)
+ try:
+ for entry in stream:
+ origpaths, revnum, author, date, message = entry
+ copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
+ in origpaths.iteritems() if e.copyfrom_path]
+ # Apply moves/copies from more specific to general
+ copies.sort(reverse=True)
+
+ srctagspath = tagspath
+ if copies and copies[-1][2] == tagspath:
+ # Track tags directory moves
+ srctagspath = copies.pop()[0]
+
+ for source, sourcerev, dest in copies:
+ if not dest.startswith(tagspath + '/'):
+ continue
+ for tag in pendings:
+ if tag[0].startswith(dest):
+ tagpath = source + tag[0][len(dest):]
+ tag[:2] = [tagpath, sourcerev]
+ break
+ else:
+ pendings.append([source, sourcerev, dest])
+
+ # Filter out tags with children coming from different
+ # parts of the repository like:
+ # /tags/tag.1 (from /trunk:10)
+ # /tags/tag.1/foo (from /branches/foo:12)
+ # Here/tags/tag.1 discarded as well as its children.
+ # It happens with tools like cvs2svn. Such tags cannot
+ # be represented in mercurial.
+ addeds = dict((p, e.copyfrom_path) for p, e
+ in origpaths.iteritems()
+ if e.action == 'A' and e.copyfrom_path)
+ badroots = set()
+ for destroot in addeds:
+ for source, sourcerev, dest in pendings:
+ if (not dest.startswith(destroot + '/')
+ or source.startswith(addeds[destroot] + '/')):
+ continue
+ badroots.add(destroot)
+ break
+
+ for badroot in badroots:
+ pendings = [p for p in pendings if p[2] != badroot
+ and not p[2].startswith(badroot + '/')]
+
+ # Tell tag renamings from tag creations
+ renamings = []
+ for source, sourcerev, dest in pendings:
+ tagname = dest.split('/')[-1]
+ if source.startswith(srctagspath):
+ renamings.append([source, sourcerev, tagname])
+ continue
+ if tagname in tags:
+ # Keep the latest tag value
+ continue
+ # From revision may be fake, get one with changes
+ try:
+ tagid = self.latest(source, sourcerev)
+ if tagid and tagname not in tags:
+ tags[tagname] = tagid
+ except SvnPathNotFound:
+ # It happens when we are following directories
+ # we assumed were copied with their parents
+ # but were really created in the tag
+ # directory.
+ pass
+ pendings = renamings
+ tagspath = srctagspath
+ finally:
+ stream.close()
+ return tags
+
+ def converted(self, rev, destrev):
+ if not self.wc:
+ return
+ if self.convertfp is None:
+ self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
+ 'a')
+ self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
+ self.convertfp.flush()
+
+ def revid(self, revnum, module=None):
+ return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
+
+ def revnum(self, rev):
+ return int(rev.split('@')[-1])
+
+ def latest(self, path, stop=None):
+ """Find the latest revid affecting path, up to stop revision
+ number. If stop is None, default to repository latest
+ revision. It may return a revision in a different module,
+ since a branch may be moved without a change being
+ reported. Return None if computed module does not belong to
+ rootmodule subtree.
+ """
+ def findchanges(path, start, stop=None):
+ stream = self._getlog([path], start, stop or 1)
+ try:
+ for entry in stream:
+ paths, revnum, author, date, message = entry
+ if stop is None and paths:
+ # We do not know the latest changed revision,
+ # keep the first one with changed paths.
+ break
+ if revnum <= stop:
+ break
+
+ for p in paths:
+ if (not path.startswith(p) or
+ not paths[p].copyfrom_path):
+ continue
+ newpath = paths[p].copyfrom_path + path[len(p):]
+ self.ui.debug("branch renamed from %s to %s at %d\n" %
+ (path, newpath, revnum))
+ path = newpath
+ break
+ if not paths:
+ revnum = None
+ return revnum, path
+ finally:
+ stream.close()
+
+ if not path.startswith(self.rootmodule):
+ # Requests on foreign branches may be forbidden at server level
+ self.ui.debug('ignoring foreign branch %r\n' % path)
+ return None
+
+ if stop is None:
+ stop = svn.ra.get_latest_revnum(self.ra)
+ try:
+ prevmodule = self.reparent('')
+ dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
+ self.reparent(prevmodule)
+ except SubversionException:
+ dirent = None
+ if not dirent:
+ raise SvnPathNotFound(_('%s not found up to revision %d')
+ % (path, stop))
+
+ # stat() gives us the previous revision on this line of
+ # development, but it might be in *another module*. Fetch the
+ # log and detect renames down to the latest revision.
+ revnum, realpath = findchanges(path, stop, dirent.created_rev)
+ if revnum is None:
+ # Tools like svnsync can create empty revision, when
+ # synchronizing only a subtree for instance. These empty
+ # revisions created_rev still have their original values
+ # despite all changes having disappeared and can be
+ # returned by ra.stat(), at least when stating the root
+ # module. In that case, do not trust created_rev and scan
+ # the whole history.
+ revnum, realpath = findchanges(path, stop)
+ if revnum is None:
+ self.ui.debug('ignoring empty branch %r\n' % realpath)
+ return None
+
+ if not realpath.startswith(self.rootmodule):
+ self.ui.debug('ignoring foreign branch %r\n' % realpath)
+ return None
+ return self.revid(revnum, realpath)
+
+ def reparent(self, module):
+ """Reparent the svn transport and return the previous parent."""
+ if self.prevmodule == module:
+ return module
+ svnurl = self.baseurl + quote(module)
+ prevmodule = self.prevmodule
+ if prevmodule is None:
+ prevmodule = ''
+ self.ui.debug("reparent to %s\n" % svnurl)
+ svn.ra.reparent(self.ra, svnurl)
+ self.prevmodule = module
+ return prevmodule
+
+ def expandpaths(self, rev, paths, parents):
+ changed, removed = set(), set()
+ copies = {}
+
+ new_module, revnum = revsplit(rev)[1:]
+ if new_module != self.module:
+ self.module = new_module
+ self.reparent(self.module)
+
+ for i, (path, ent) in enumerate(paths):
+ self.ui.progress(_('scanning paths'), i, item=path,
+ total=len(paths))
+ entrypath = self.getrelpath(path)
+
+ kind = self._checkpath(entrypath, revnum)
+ if kind == svn.core.svn_node_file:
+ changed.add(self.recode(entrypath))
+ if not ent.copyfrom_path or not parents:
+ continue
+ # Copy sources not in parent revisions cannot be
+ # represented, ignore their origin for now
+ pmodule, prevnum = revsplit(parents[0])[1:]
+ if ent.copyfrom_rev < prevnum:
+ continue
+ copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
+ if not copyfrom_path:
+ continue
+ self.ui.debug("copied to %s from %s@%s\n" %
+ (entrypath, copyfrom_path, ent.copyfrom_rev))
+ copies[self.recode(entrypath)] = self.recode(copyfrom_path)
+ elif kind == 0: # gone, but had better be a deleted *file*
+ self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
+ pmodule, prevnum = revsplit(parents[0])[1:]
+ parentpath = pmodule + "/" + entrypath
+ fromkind = self._checkpath(entrypath, prevnum, pmodule)
+
+ if fromkind == svn.core.svn_node_file:
+ removed.add(self.recode(entrypath))
+ elif fromkind == svn.core.svn_node_dir:
+ oroot = parentpath.strip('/')
+ nroot = path.strip('/')
+ children = self._iterfiles(oroot, prevnum)
+ for childpath in children:
+ childpath = childpath.replace(oroot, nroot)
+ childpath = self.getrelpath("/" + childpath, pmodule)
+ if childpath:
+ removed.add(self.recode(childpath))
+ else:
+ self.ui.debug('unknown path in revision %d: %s\n' % \
+ (revnum, path))
+ elif kind == svn.core.svn_node_dir:
+ if ent.action == 'M':
+ # If the directory just had a prop change,
+ # then we shouldn't need to look for its children.
+ continue
+ if ent.action == 'R' and parents:
+ # If a directory is replacing a file, mark the previous
+ # file as deleted
+ pmodule, prevnum = revsplit(parents[0])[1:]
+ pkind = self._checkpath(entrypath, prevnum, pmodule)
+ if pkind == svn.core.svn_node_file:
+ removed.add(self.recode(entrypath))
+ elif pkind == svn.core.svn_node_dir:
+ # We do not know what files were kept or removed,
+ # mark them all as changed.
+ for childpath in self._iterfiles(pmodule, prevnum):
+ childpath = self.getrelpath("/" + childpath)
+ if childpath:
+ changed.add(self.recode(childpath))
+
+ for childpath in self._iterfiles(path, revnum):
+ childpath = self.getrelpath("/" + childpath)
+ if childpath:
+ changed.add(self.recode(childpath))
+
+ # Handle directory copies
+ if not ent.copyfrom_path or not parents:
+ continue
+ # Copy sources not in parent revisions cannot be
+ # represented, ignore their origin for now
+ pmodule, prevnum = revsplit(parents[0])[1:]
+ if ent.copyfrom_rev < prevnum:
+ continue
+ copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
+ if not copyfrompath:
+ continue
+ self.ui.debug("mark %s came from %s:%d\n"
+ % (path, copyfrompath, ent.copyfrom_rev))
+ children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
+ for childpath in children:
+ childpath = self.getrelpath("/" + childpath, pmodule)
+ if not childpath:
+ continue
+ copytopath = path + childpath[len(copyfrompath):]
+ copytopath = self.getrelpath(copytopath)
+ copies[self.recode(copytopath)] = self.recode(childpath)
+
+ self.ui.progress(_('scanning paths'), None)
+ changed.update(removed)
+ return (list(changed), removed, copies)
+
+ def _fetch_revisions(self, from_revnum, to_revnum):
+ if from_revnum < to_revnum:
+ from_revnum, to_revnum = to_revnum, from_revnum
+
+ self.child_cset = None
+
+ def parselogentry(orig_paths, revnum, author, date, message):
+ """Return the parsed commit object or None, and True if
+ the revision is a branch root.
+ """
+ self.ui.debug("parsing revision %d (%d changes)\n" %
+ (revnum, len(orig_paths)))
+
+ branched = False
+ rev = self.revid(revnum)
+ # branch log might return entries for a parent we already have
+
+ if rev in self.commits or revnum < to_revnum:
+ return None, branched
+
+ parents = []
+ # check whether this revision is the start of a branch or part
+ # of a branch renaming
+ orig_paths = sorted(orig_paths.iteritems())
+ root_paths = [(p, e) for p, e in orig_paths
+ if self.module.startswith(p)]
+ if root_paths:
+ path, ent = root_paths[-1]
+ if ent.copyfrom_path:
+ branched = True
+ newpath = ent.copyfrom_path + self.module[len(path):]
+ # ent.copyfrom_rev may not be the actual last revision
+ previd = self.latest(newpath, ent.copyfrom_rev)
+ if previd is not None:
+ prevmodule, prevnum = revsplit(previd)[1:]
+ if prevnum >= self.startrev:
+ parents = [previd]
+ self.ui.note(
+ _('found parent of branch %s at %d: %s\n') %
+ (self.module, prevnum, prevmodule))
+ else:
+ self.ui.debug("no copyfrom path, don't know what to do.\n")
+
+ paths = []
+ # filter out unrelated paths
+ for path, ent in orig_paths:
+ if self.getrelpath(path) is None:
+ continue
+ paths.append((path, ent))
+
+ # Example SVN datetime. Includes microseconds.
+ # ISO-8601 conformant
+ # '2007-01-04T17:35:00.902377Z'
+ date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
+
+ log = message and self.recode(message) or ''
+ author = author and self.recode(author) or ''
+ try:
+ branch = self.module.split("/")[-1]
+ if branch == self.trunkname:
+ branch = None
+ except IndexError:
+ branch = None
+
+ cset = commit(author=author,
+ date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ desc=log,
+ parents=parents,
+ branch=branch,
+ rev=rev)
+
+ self.commits[rev] = cset
+ # The parents list is *shared* among self.paths and the
+ # commit object. Both will be updated below.
+ self.paths[rev] = (paths, cset.parents)
+ if self.child_cset and not self.child_cset.parents:
+ self.child_cset.parents[:] = [rev]
+ self.child_cset = cset
+ return cset, branched
+
+ self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
+ (self.module, from_revnum, to_revnum))
+
+ try:
+ firstcset = None
+ lastonbranch = False
+ stream = self._getlog([self.module], from_revnum, to_revnum)
+ try:
+ for entry in stream:
+ paths, revnum, author, date, message = entry
+ if revnum < self.startrev:
+ lastonbranch = True
+ break
+ if not paths:
+ self.ui.debug('revision %d has no entries\n' % revnum)
+ # If we ever leave the loop on an empty
+ # revision, do not try to get a parent branch
+ lastonbranch = lastonbranch or revnum == 0
+ continue
+ cset, lastonbranch = parselogentry(paths, revnum, author,
+ date, message)
+ if cset:
+ firstcset = cset
+ if lastonbranch:
+ break
+ finally:
+ stream.close()
+
+ if not lastonbranch and firstcset and not firstcset.parents:
+ # The first revision of the sequence (the last fetched one)
+ # has invalid parents if not a branch root. Find the parent
+ # revision now, if any.
+ try:
+ firstrevnum = self.revnum(firstcset.rev)
+ if firstrevnum > 1:
+ latest = self.latest(self.module, firstrevnum - 1)
+ if latest:
+ firstcset.parents.append(latest)
+ except SvnPathNotFound:
+ pass
+ except SubversionException, (inst, num):
+ if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
+ raise util.Abort(_('svn: branch has no revision %s')
+ % to_revnum)
+ raise
+
+ def getfile(self, file, rev):
+ # TODO: ra.get_file transmits the whole file instead of diffs.
+ if file in self.removed:
+ raise IOError
+ mode = ''
+ try:
+ new_module, revnum = revsplit(rev)[1:]
+ if self.module != new_module:
+ self.module = new_module
+ self.reparent(self.module)
+ io = StringIO()
+ info = svn.ra.get_file(self.ra, file, revnum, io)
+ data = io.getvalue()
+ # ra.get_files() seems to keep a reference on the input buffer
+ # preventing collection. Release it explicitely.
+ io.close()
+ if isinstance(info, list):
+ info = info[-1]
+ mode = ("svn:executable" in info) and 'x' or ''
+ mode = ("svn:special" in info) and 'l' or mode
+ except SubversionException, e:
+ notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
+ svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
+ if e.apr_err in notfound: # File not found
+ raise IOError
+ raise
+ if mode == 'l':
+ link_prefix = "link "
+ if data.startswith(link_prefix):
+ data = data[len(link_prefix):]
+ return data, mode
+
+ def _iterfiles(self, path, revnum):
+ """Enumerate all files in path at revnum, recursively."""
+ path = path.strip('/')
+ pool = Pool()
+ rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
+ entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
+ if path:
+ path += '/'
+ return ((path + p) for p, e in entries.iteritems()
+ if e.kind == svn.core.svn_node_file)
+
+ def getrelpath(self, path, module=None):
+ if module is None:
+ module = self.module
+ # Given the repository url of this wc, say
+ # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
+ # extract the "entry" portion (a relative path) from what
+ # svn log --xml says, ie
+ # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
+ # that is to say "tests/PloneTestCase.py"
+ if path.startswith(module):
+ relative = path.rstrip('/')[len(module):]
+ if relative.startswith('/'):
+ return relative[1:]
+ elif relative == '':
+ return relative
+
+ # The path is outside our tracked tree...
+ self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
+ return None
+
+ def _checkpath(self, path, revnum, module=None):
+ if module is not None:
+ prevmodule = self.reparent('')
+ path = module + '/' + path
+ try:
+ # ra.check_path does not like leading slashes very much, it leads
+ # to PROPFIND subversion errors
+ return svn.ra.check_path(self.ra, path.strip('/'), revnum)
+ finally:
+ if module is not None:
+ self.reparent(prevmodule)
+
+ def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
+ strict_node_history=False):
+ # Normalize path names, svn >= 1.5 only wants paths relative to
+ # supplied URL
+ relpaths = []
+ for p in paths:
+ if not p.startswith('/'):
+ p = self.module + '/' + p
+ relpaths.append(p.strip('/'))
+ args = [self.baseurl, relpaths, start, end, limit,
+ discover_changed_paths, strict_node_history]
+ arg = encodeargs(args)
+ hgexe = util.hgexecutable()
+ cmd = '%s debugsvnlog' % util.shellquote(hgexe)
+ stdin, stdout = util.popen2(util.quotecommand(cmd))
+ stdin.write(arg)
+ try:
+ stdin.close()
+ except IOError:
+ raise util.Abort(_('Mercurial failed to run itself, check'
+ ' hg executable is in PATH'))
+ return logstream(stdout)
+
+pre_revprop_change = '''#!/bin/sh
+
+REPOS="$1"
+REV="$2"
+USER="$3"
+PROPNAME="$4"
+ACTION="$5"
+
+if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+
+echo "Changing prohibited revision property" >&2
+exit 1
+'''
+
+class svn_sink(converter_sink, commandline):
+ commit_re = re.compile(r'Committed revision (\d+).', re.M)
+ uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
+
+ def prerun(self):
+ if self.wc:
+ os.chdir(self.wc)
+
+ def postrun(self):
+ if self.wc:
+ os.chdir(self.cwd)
+
+ def join(self, name):
+ return os.path.join(self.wc, '.svn', name)
+
+ def revmapfile(self):
+ return self.join('hg-shamap')
+
+ def authorfile(self):
+ return self.join('hg-authormap')
+
+ def __init__(self, ui, path):
+
+ converter_sink.__init__(self, ui, path)
+ commandline.__init__(self, ui, 'svn')
+ self.delete = []
+ self.setexec = []
+ self.delexec = []
+ self.copies = []
+ self.wc = None
+ self.cwd = os.getcwd()
+
+ created = False
+ if os.path.isfile(os.path.join(path, '.svn', 'entries')):
+ self.wc = os.path.realpath(path)
+ self.run0('update')
+ else:
+ if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
+ path = os.path.realpath(path)
+ if os.path.isdir(os.path.dirname(path)):
+ if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
+ ui.status(_('initializing svn repository %r\n') %
+ os.path.basename(path))
+ commandline(ui, 'svnadmin').run0('create', path)
+ created = path
+ path = util.normpath(path)
+ if not path.startswith('/'):
+ path = '/' + path
+ path = 'file://' + path
+
+ wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
+ ui.status(_('initializing svn working copy %r\n')
+ % os.path.basename(wcpath))
+ self.run0('checkout', path, wcpath)
+
+ self.wc = wcpath
+ self.opener = scmutil.opener(self.wc)
+ self.wopener = scmutil.opener(self.wc)
+ self.childmap = mapfile(ui, self.join('hg-childmap'))
+ self.is_exec = util.checkexec(self.wc) and util.isexec or None
+
+ if created:
+ hook = os.path.join(created, 'hooks', 'pre-revprop-change')
+ fp = open(hook, 'w')
+ fp.write(pre_revprop_change)
+ fp.close()
+ util.setflags(hook, False, True)
+
+ output = self.run0('info')
+ self.uuid = self.uuid_re.search(output).group(1).strip()
+
+ def wjoin(self, *names):
+ return os.path.join(self.wc, *names)
+
+ @propertycache
+ def manifest(self):
+ # As of svn 1.7, the "add" command fails when receiving
+ # already tracked entries, so we have to track and filter them
+ # ourselves.
+ m = set()
+ output = self.run0('ls', recursive=True, xml=True)
+ doc = xml.dom.minidom.parseString(output)
+ for e in doc.getElementsByTagName('entry'):
+ for n in e.childNodes:
+ if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
+ continue
+ name = ''.join(c.data for c in n.childNodes
+ if c.nodeType == c.TEXT_NODE)
+ # Entries are compared with names coming from
+ # mercurial, so bytes with undefined encoding. Our
+ # best bet is to assume they are in local
+ # encoding. They will be passed to command line calls
+ # later anyway, so they better be.
+ m.add(encoding.tolocal(name.encode('utf-8')))
+ break
+ return m
+
+ def putfile(self, filename, flags, data):
+ if 'l' in flags:
+ self.wopener.symlink(data, filename)
+ else:
+ try:
+ if os.path.islink(self.wjoin(filename)):
+ os.unlink(filename)
+ except OSError:
+ pass
+ self.wopener.write(filename, data)
+
+ if self.is_exec:
+ if self.is_exec(self.wjoin(filename)):
+ if 'x' not in flags:
+ self.delexec.append(filename)
+ else:
+ if 'x' in flags:
+ self.setexec.append(filename)
+ util.setflags(self.wjoin(filename), False, 'x' in flags)
+
+ def _copyfile(self, source, dest):
+ # SVN's copy command pukes if the destination file exists, but
+ # our copyfile method expects to record a copy that has
+ # already occurred. Cross the semantic gap.
+ wdest = self.wjoin(dest)
+ exists = os.path.lexists(wdest)
+ if exists:
+ fd, tempname = tempfile.mkstemp(
+ prefix='hg-copy-', dir=os.path.dirname(wdest))
+ os.close(fd)
+ os.unlink(tempname)
+ os.rename(wdest, tempname)
+ try:
+ self.run0('copy', source, dest)
+ finally:
+ self.manifest.add(dest)
+ if exists:
+ try:
+ os.unlink(wdest)
+ except OSError:
+ pass
+ os.rename(tempname, wdest)
+
+ def dirs_of(self, files):
+ dirs = set()
+ for f in files:
+ if os.path.isdir(self.wjoin(f)):
+ dirs.add(f)
+ for i in strutil.rfindall(f, '/'):
+ dirs.add(f[:i])
+ return dirs
+
+ def add_dirs(self, files):
+ add_dirs = [d for d in sorted(self.dirs_of(files))
+ if d not in self.manifest]
+ if add_dirs:
+ self.manifest.update(add_dirs)
+ self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
+ return add_dirs
+
+ def add_files(self, files):
+ files = [f for f in files if f not in self.manifest]
+ if files:
+ self.manifest.update(files)
+ self.xargs(files, 'add', quiet=True)
+ return files
+
+ def tidy_dirs(self, names):
+ deleted = []
+ for d in sorted(self.dirs_of(names), reverse=True):
+ wd = self.wjoin(d)
+ if os.listdir(wd) == '.svn':
+ self.run0('delete', d)
+ self.manifest.remove(d)
+ deleted.append(d)
+ return deleted
+
+ def addchild(self, parent, child):
+ self.childmap[parent] = child
+
+ def revid(self, rev):
+ return u"svn:%s@%s" % (self.uuid, rev)
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+ for parent in parents:
+ try:
+ return self.revid(self.childmap[parent])
+ except KeyError:
+ pass
+
+ # Apply changes to working copy
+ for f, v in files:
+ try:
+ data, mode = source.getfile(f, v)
+ except IOError:
+ self.delete.append(f)
+ else:
+ self.putfile(f, mode, data)
+ if f in copies:
+ self.copies.append([copies[f], f])
+ files = [f[0] for f in files]
+
+ entries = set(self.delete)
+ files = frozenset(files)
+ entries.update(self.add_dirs(files.difference(entries)))
+ if self.copies:
+ for s, d in self.copies:
+ self._copyfile(s, d)
+ self.copies = []
+ if self.delete:
+ self.xargs(self.delete, 'delete')
+ for f in self.delete:
+ self.manifest.remove(f)
+ self.delete = []
+ entries.update(self.add_files(files.difference(entries)))
+ entries.update(self.tidy_dirs(entries))
+ if self.delexec:
+ self.xargs(self.delexec, 'propdel', 'svn:executable')
+ self.delexec = []
+ if self.setexec:
+ self.xargs(self.setexec, 'propset', 'svn:executable', '*')
+ self.setexec = []
+
+ fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
+ fp = os.fdopen(fd, 'w')
+ fp.write(commit.desc)
+ fp.close()
+ try:
+ output = self.run0('commit',
+ username=util.shortuser(commit.author),
+ file=messagefile,
+ encoding='utf-8')
+ try:
+ rev = self.commit_re.search(output).group(1)
+ except AttributeError:
+ if not files:
+ return parents[0]
+ self.ui.warn(_('unexpected svn output:\n'))
+ self.ui.warn(output)
+ raise util.Abort(_('unable to cope with svn output'))
+ if commit.rev:
+ self.run('propset', 'hg:convert-rev', commit.rev,
+ revprop=True, revision=rev)
+ if commit.branch and commit.branch != 'default':
+ self.run('propset', 'hg:convert-branch', commit.branch,
+ revprop=True, revision=rev)
+ for parent in parents:
+ self.addchild(parent, rev)
+ return self.revid(rev)
+ finally:
+ os.unlink(messagefile)
+
+ def puttags(self, tags):
+ self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
+ return None, None
+
+ def hascommit(self, rev):
+ # This is not correct as one can convert to an existing subversion
+ # repository and childmap would not list all revisions. Too bad.
+ if rev in self.childmap:
+ return True
+ raise util.Abort(_('splice map revision %s not found in subversion '
+ 'child map (revision lookups are not implemented)')
+ % rev)
diff --git a/hgext/convert/transport.py b/hgext/convert/transport.py
new file mode 100644
index 0000000..6a8c565
--- /dev/null
+++ b/hgext/convert/transport.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
+# This is a stripped-down version of the original bzr-svn transport.py,
+# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+from mercurial import util
+from svn.core import SubversionException, Pool
+import svn.ra
+import svn.client
+import svn.core
+
+# Some older versions of the Python bindings need to be
+# explicitly initialized. But what we want to do probably
+# won't work worth a darn against those libraries anyway!
+svn.ra.initialize()
+
+svn_config = svn.core.svn_config_get_config(None)
+
+
+def _create_auth_baton(pool):
+ """Create a Subversion authentication baton. """
+ import svn.client
+ # Give the client context baton a suite of authentication
+ # providers.h
+ providers = [
+ svn.client.get_simple_provider(pool),
+ svn.client.get_username_provider(pool),
+ svn.client.get_ssl_client_cert_file_provider(pool),
+ svn.client.get_ssl_client_cert_pw_file_provider(pool),
+ svn.client.get_ssl_server_trust_file_provider(pool),
+ ]
+ # Platform-dependant authentication methods
+ getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
+ None)
+ if getprovider:
+ # Available in svn >= 1.6
+ for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
+ for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
+ p = getprovider(name, type, pool)
+ if p:
+ providers.append(p)
+ else:
+ if util.safehasattr(svn.client, 'get_windows_simple_provider'):
+ providers.append(svn.client.get_windows_simple_provider(pool))
+
+ return svn.core.svn_auth_open(providers, pool)
+
+class NotBranchError(SubversionException):
+ pass
+
+class SvnRaTransport(object):
+ """
+ Open an ra connection to a Subversion repository.
+ """
+ def __init__(self, url="", ra=None):
+ self.pool = Pool()
+ self.svn_url = url
+ self.username = ''
+ self.password = ''
+
+ # Only Subversion 1.4 has reparent()
+ if ra is None or not util.safehasattr(svn.ra, 'reparent'):
+ self.client = svn.client.create_context(self.pool)
+ ab = _create_auth_baton(self.pool)
+ if False:
+ svn.core.svn_auth_set_parameter(
+ ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
+ svn.core.svn_auth_set_parameter(
+ ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
+ self.client.auth_baton = ab
+ self.client.config = svn_config
+ try:
+ self.ra = svn.client.open_ra_session(
+ self.svn_url,
+ self.client, self.pool)
+ except SubversionException, (inst, num):
+ if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
+ svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
+ svn.core.SVN_ERR_BAD_URL):
+ raise NotBranchError(url)
+ raise
+ else:
+ self.ra = ra
+ svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
+
+ class Reporter(object):
+ def __init__(self, reporter_data):
+ self._reporter, self._baton = reporter_data
+
+ def set_path(self, path, revnum, start_empty, lock_token, pool=None):
+ svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
+ path, revnum, start_empty, lock_token, pool)
+
+ def delete_path(self, path, pool=None):
+ svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
+ path, pool)
+
+ def link_path(self, path, url, revision, start_empty, lock_token,
+ pool=None):
+ svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
+ path, url, revision, start_empty, lock_token,
+ pool)
+
+ def finish_report(self, pool=None):
+ svn.ra.reporter2_invoke_finish_report(self._reporter,
+ self._baton, pool)
+
+ def abort_report(self, pool=None):
+ svn.ra.reporter2_invoke_abort_report(self._reporter,
+ self._baton, pool)
+
+ def do_update(self, revnum, path, *args, **kwargs):
+ return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
+ *args, **kwargs))
diff --git a/hgext/eol.py b/hgext/eol.py
new file mode 100644
index 0000000..951922c
--- /dev/null
+++ b/hgext/eol.py
@@ -0,0 +1,349 @@
+"""automatically manage newlines in repository files
+
+This extension allows you to manage the type of line endings (CRLF or
+LF) that are used in the repository and in the local working
+directory. That way you can get CRLF line endings on Windows and LF on
+Unix/Mac, thereby letting everybody use their OS native line endings.
+
+The extension reads its configuration from a versioned ``.hgeol``
+configuration file found in the root of the working copy. The
+``.hgeol`` file use the same syntax as all other Mercurial
+configuration files. It uses two sections, ``[patterns]`` and
+``[repository]``.
+
+The ``[patterns]`` section specifies how line endings should be
+converted between the working copy and the repository. The format is
+specified by a file pattern. The first match is used, so put more
+specific patterns first. The available line endings are ``LF``,
+``CRLF``, and ``BIN``.
+
+Files with the declared format of ``CRLF`` or ``LF`` are always
+checked out and stored in the repository in that format and files
+declared to be binary (``BIN``) are left unchanged. Additionally,
+``native`` is an alias for checking out in the platform's default line
+ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
+Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
+default behaviour; it is only needed if you need to override a later,
+more general pattern.
+
+The optional ``[repository]`` section specifies the line endings to
+use for files stored in the repository. It has a single setting,
+``native``, which determines the storage line endings for files
+declared as ``native`` in the ``[patterns]`` section. It can be set to
+``LF`` or ``CRLF``. The default is ``LF``. For example, this means
+that on Windows, files configured as ``native`` (``CRLF`` by default)
+will be converted to ``LF`` when stored in the repository. Files
+declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
+are always stored as-is in the repository.
+
+Example versioned ``.hgeol`` file::
+
+ [patterns]
+ **.py = native
+ **.vcproj = CRLF
+ **.txt = native
+ Makefile = LF
+ **.jpg = BIN
+
+ [repository]
+ native = LF
+
+.. note::
+ The rules will first apply when files are touched in the working
+ copy, e.g. by updating to null and back to tip to touch all files.
+
+The extension uses an optional ``[eol]`` section read from both the
+normal Mercurial configuration files and the ``.hgeol`` file, with the
+latter overriding the former. You can use that section to control the
+overall behavior. There are three settings:
+
+- ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
+ ``CRLF`` to override the default interpretation of ``native`` for
+ checkout. This can be used with :hg:`archive` on Unix, say, to
+ generate an archive where files have line endings for Windows.
+
+- ``eol.only-consistent`` (default True) can be set to False to make
+ the extension convert files with inconsistent EOLs. Inconsistent
+ means that there is both ``CRLF`` and ``LF`` present in the file.
+ Such files are normally not touched under the assumption that they
+ have mixed EOLs on purpose.
+
+- ``eol.fix-trailing-newline`` (default False) can be set to True to
+ ensure that converted files end with a EOL character (either ``\\n``
+ or ``\\r\\n`` as per the configured patterns).
+
+The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
+like the deprecated win32text extension does. This means that you can
+disable win32text and enable eol and your filters will still work. You
+only need to these filters until you have prepared a ``.hgeol`` file.
+
+The ``win32text.forbid*`` hooks provided by the win32text extension
+have been unified into a single hook named ``eol.checkheadshook``. The
+hook will lookup the expected line endings from the ``.hgeol`` file,
+which means you must migrate to a ``.hgeol`` file first before using
+the hook. ``eol.checkheadshook`` only checks heads, intermediate
+invalid revisions will be pushed. To forbid them completely, use the
+``eol.checkallhook`` hook. These hooks are best used as
+``pretxnchangegroup`` hooks.
+
+See :hg:`help patterns` for more information about the glob patterns
+used.
+"""
+
+from mercurial.i18n import _
+from mercurial import util, config, extensions, match, error
+import re, os
+
+testedwith = 'internal'
+
+# Matches a lone LF, i.e., one that is not part of CRLF.
+singlelf = re.compile('(^|[^\r])\n')
+# Matches a single EOL which can either be a CRLF where repeated CR
+# are removed or a LF. We do not care about old Machintosh files, so a
+# stray CR is an error.
+eolre = re.compile('\r*\n')
+
+
+def inconsistenteol(data):
+ return '\r\n' in data and singlelf.search(data)
+
+def tolf(s, params, ui, **kwargs):
+ """Filter to convert to LF EOLs."""
+ if util.binary(s):
+ return s
+ if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
+ return s
+ if (ui.configbool('eol', 'fix-trailing-newline', False)
+ and s and s[-1] != '\n'):
+ s = s + '\n'
+ return eolre.sub('\n', s)
+
+def tocrlf(s, params, ui, **kwargs):
+ """Filter to convert to CRLF EOLs."""
+ if util.binary(s):
+ return s
+ if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
+ return s
+ if (ui.configbool('eol', 'fix-trailing-newline', False)
+ and s and s[-1] != '\n'):
+ s = s + '\n'
+ return eolre.sub('\r\n', s)
+
+def isbinary(s, params):
+ """Filter to do nothing with the file."""
+ return s
+
+filters = {
+ 'to-lf': tolf,
+ 'to-crlf': tocrlf,
+ 'is-binary': isbinary,
+ # The following provide backwards compatibility with win32text
+ 'cleverencode:': tolf,
+ 'cleverdecode:': tocrlf
+}
+
+class eolfile(object):
+ def __init__(self, ui, root, data):
+ self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
+ self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
+
+ self.cfg = config.config()
+ # Our files should not be touched. The pattern must be
+ # inserted first override a '** = native' pattern.
+ self.cfg.set('patterns', '.hg*', 'BIN')
+ # We can then parse the user's patterns.
+ self.cfg.parse('.hgeol', data)
+
+ isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
+ self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
+ iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
+ self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
+
+ include = []
+ exclude = []
+ for pattern, style in self.cfg.items('patterns'):
+ key = style.upper()
+ if key == 'BIN':
+ exclude.append(pattern)
+ else:
+ include.append(pattern)
+ # This will match the files for which we need to care
+ # about inconsistent newlines.
+ self.match = match.match(root, '', [], include, exclude)
+
+ def copytoui(self, ui):
+ for pattern, style in self.cfg.items('patterns'):
+ key = style.upper()
+ try:
+ ui.setconfig('decode', pattern, self._decode[key])
+ ui.setconfig('encode', pattern, self._encode[key])
+ except KeyError:
+ ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
+ % (style, self.cfg.source('patterns', pattern)))
+ # eol.only-consistent can be specified in ~/.hgrc or .hgeol
+ for k, v in self.cfg.items('eol'):
+ ui.setconfig('eol', k, v)
+
+ def checkrev(self, repo, ctx, files):
+ failed = []
+ for f in (files or ctx.files()):
+ if f not in ctx:
+ continue
+ for pattern, style in self.cfg.items('patterns'):
+ if not match.match(repo.root, '', [pattern])(f):
+ continue
+ target = self._encode[style.upper()]
+ data = ctx[f].data()
+ if (target == "to-lf" and "\r\n" in data
+ or target == "to-crlf" and singlelf.search(data)):
+ failed.append((str(ctx), target, f))
+ break
+ return failed
+
+def parseeol(ui, repo, nodes):
+ try:
+ for node in nodes:
+ try:
+ if node is None:
+ # Cannot use workingctx.data() since it would load
+ # and cache the filters before we configure them.
+ data = repo.wfile('.hgeol').read()
+ else:
+ data = repo[node]['.hgeol'].data()
+ return eolfile(ui, repo.root, data)
+ except (IOError, LookupError):
+ pass
+ except error.ParseError, inst:
+ ui.warn(_("warning: ignoring .hgeol file due to parse error "
+ "at %s: %s\n") % (inst.args[1], inst.args[0]))
+ return None
+
+def _checkhook(ui, repo, node, headsonly):
+ # Get revisions to check and touched files at the same time
+ files = set()
+ revs = set()
+ for rev in xrange(repo[node].rev(), len(repo)):
+ revs.add(rev)
+ if headsonly:
+ ctx = repo[rev]
+ files.update(ctx.files())
+ for pctx in ctx.parents():
+ revs.discard(pctx.rev())
+ failed = []
+ for rev in revs:
+ ctx = repo[rev]
+ eol = parseeol(ui, repo, [ctx.node()])
+ if eol:
+ failed.extend(eol.checkrev(repo, ctx, files))
+
+ if failed:
+ eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
+ msgs = []
+ for node, target, f in failed:
+ msgs.append(_(" %s in %s should not have %s line endings") %
+ (f, node, eols[target]))
+ raise util.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
+
+def checkallhook(ui, repo, node, hooktype, **kwargs):
+ """verify that files have expected EOLs"""
+ _checkhook(ui, repo, node, False)
+
+def checkheadshook(ui, repo, node, hooktype, **kwargs):
+ """verify that files have expected EOLs"""
+ _checkhook(ui, repo, node, True)
+
+# "checkheadshook" used to be called "hook"
+hook = checkheadshook
+
+def preupdate(ui, repo, hooktype, parent1, parent2):
+ repo.loadeol([parent1])
+ return False
+
+def uisetup(ui):
+ ui.setconfig('hooks', 'preupdate.eol', preupdate)
+
+def extsetup(ui):
+ try:
+ extensions.find('win32text')
+ ui.warn(_("the eol extension is incompatible with the "
+ "win32text extension\n"))
+ except KeyError:
+ pass
+
+
+def reposetup(ui, repo):
+ uisetup(repo.ui)
+
+ if not repo.local():
+ return
+ for name, fn in filters.iteritems():
+ repo.adddatafilter(name, fn)
+
+ ui.setconfig('patch', 'eol', 'auto')
+
+ class eolrepo(repo.__class__):
+
+ def loadeol(self, nodes):
+ eol = parseeol(self.ui, self, nodes)
+ if eol is None:
+ return None
+ eol.copytoui(self.ui)
+ return eol.match
+
+ def _hgcleardirstate(self):
+ self._eolfile = self.loadeol([None, 'tip'])
+ if not self._eolfile:
+ self._eolfile = util.never
+ return
+
+ try:
+ cachemtime = os.path.getmtime(self.join("eol.cache"))
+ except OSError:
+ cachemtime = 0
+
+ try:
+ eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
+ except OSError:
+ eolmtime = 0
+
+ if eolmtime > cachemtime:
+ ui.debug("eol: detected change in .hgeol\n")
+ wlock = None
+ try:
+ wlock = self.wlock()
+ for f in self.dirstate:
+ if self.dirstate[f] == 'n':
+ # all normal files need to be looked at
+ # again since the new .hgeol file might no
+ # longer match a file it matched before
+ self.dirstate.normallookup(f)
+ # Create or touch the cache to update mtime
+ self.opener("eol.cache", "w").close()
+ wlock.release()
+ except error.LockUnavailable:
+ # If we cannot lock the repository and clear the
+ # dirstate, then a commit might not see all files
+ # as modified. But if we cannot lock the
+ # repository, then we can also not make a commit,
+ # so ignore the error.
+ pass
+
+ def commitctx(self, ctx, error=False):
+ for f in sorted(ctx.added() + ctx.modified()):
+ if not self._eolfile(f):
+ continue
+ try:
+ data = ctx[f].data()
+ except IOError:
+ continue
+ if util.binary(data):
+ # We should not abort here, since the user should
+ # be able to say "** = native" to automatically
+ # have all non-binary files taken care of.
+ continue
+ if inconsistenteol(data):
+ raise util.Abort(_("inconsistent newline style "
+ "in %s\n" % f))
+ return super(eolrepo, self).commitctx(ctx, error)
+ repo.__class__ = eolrepo
+ repo._hgcleardirstate()
diff --git a/hgext/extdiff.py b/hgext/extdiff.py
new file mode 100644
index 0000000..bae60e8
--- /dev/null
+++ b/hgext/extdiff.py
@@ -0,0 +1,331 @@
+# extdiff.py - external diff program support for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to allow external programs to compare revisions
+
+The extdiff Mercurial extension allows you to use external programs
+to compare revisions, or revision with working directory. The external
+diff programs are called with a configurable set of options and two
+non-option arguments: paths to directories containing snapshots of
+files to compare.
+
+The extdiff extension also allows you to configure new diff commands, so
+you do not need to type :hg:`extdiff -p kdiff3` always. ::
+
+ [extdiff]
+ # add new command that runs GNU diff(1) in 'context diff' mode
+ cdiff = gdiff -Nprc5
+ ## or the old way:
+ #cmd.cdiff = gdiff
+ #opts.cdiff = -Nprc5
+
+ # add new command called vdiff, runs kdiff3
+ vdiff = kdiff3
+
+ # add new command called meld, runs meld (no need to name twice)
+ meld =
+
+ # add new command called vimdiff, runs gvimdiff with DirDiff plugin
+ # (see http://www.vim.org/scripts/script.php?script_id=102) Non
+ # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
+ # your .vimrc
+ vimdiff = gvim -f "+next" \\
+ "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
+
+Tool arguments can include variables that are expanded at runtime::
+
+ $parent1, $plabel1 - filename, descriptive label of first parent
+ $child, $clabel - filename, descriptive label of child revision
+ $parent2, $plabel2 - filename, descriptive label of second parent
+ $root - repository root
+ $parent is an alias for $parent1.
+
+The extdiff extension will look in your [diff-tools] and [merge-tools]
+sections for diff tool arguments, when none are specified in [extdiff].
+
+::
+
+ [extdiff]
+ kdiff3 =
+
+ [diff-tools]
+ kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
+
+You can use -I/-X and list of file or directory names like normal
+:hg:`diff` command. The extdiff extension makes snapshots of only
+needed files, so running the external diff program will actually be
+pretty fast (at least faster than having to compare the entire tree).
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short, nullid
+from mercurial import scmutil, scmutil, util, commands, encoding
+import os, shlex, shutil, tempfile, re
+
+testedwith = 'internal'
+
+def snapshot(ui, repo, files, node, tmproot):
+ '''snapshot files as of some revision
+ if not using snapshot, -I/-X does not work and recursive diff
+ in tools like kdiff3 and meld displays too many files.'''
+ dirname = os.path.basename(repo.root)
+ if dirname == "":
+ dirname = "root"
+ if node is not None:
+ dirname = '%s.%s' % (dirname, short(node))
+ base = os.path.join(tmproot, dirname)
+ os.mkdir(base)
+ if node is not None:
+ ui.note(_('making snapshot of %d files from rev %s\n') %
+ (len(files), short(node)))
+ else:
+ ui.note(_('making snapshot of %d files from working directory\n') %
+ (len(files)))
+ wopener = scmutil.opener(base)
+ fns_and_mtime = []
+ ctx = repo[node]
+ for fn in files:
+ wfn = util.pconvert(fn)
+ if wfn not in ctx:
+ # File doesn't exist; could be a bogus modify
+ continue
+ ui.note(' %s\n' % wfn)
+ dest = os.path.join(base, wfn)
+ fctx = ctx[wfn]
+ data = repo.wwritedata(wfn, fctx.data())
+ if 'l' in fctx.flags():
+ wopener.symlink(data, wfn)
+ else:
+ wopener.write(wfn, data)
+ if 'x' in fctx.flags():
+ util.setflags(dest, False, True)
+ if node is None:
+ fns_and_mtime.append((dest, repo.wjoin(fn),
+ os.lstat(dest).st_mtime))
+ return dirname, fns_and_mtime
+
+def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
+ '''Do the actuall diff:
+
+ - copy to a temp structure if diffing 2 internal revisions
+ - copy to a temp structure if diffing working revision with
+ another one and more than 1 file is changed
+ - just invoke the diff for a single file in the working dir
+ '''
+
+ revs = opts.get('rev')
+ change = opts.get('change')
+ args = ' '.join(diffopts)
+ do3way = '$parent2' in args
+
+ if revs and change:
+ msg = _('cannot specify --rev and --change at the same time')
+ raise util.Abort(msg)
+ elif change:
+ node2 = scmutil.revsingle(repo, change, None).node()
+ node1a, node1b = repo.changelog.parents(node2)
+ else:
+ node1a, node2 = scmutil.revpair(repo, revs)
+ if not revs:
+ node1b = repo.dirstate.p2()
+ else:
+ node1b = nullid
+
+ # Disable 3-way merge if there is only one parent
+ if do3way:
+ if node1b == nullid:
+ do3way = False
+
+ matcher = scmutil.match(repo[node2], pats, opts)
+ mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
+ if do3way:
+ mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
+ else:
+ mod_b, add_b, rem_b = set(), set(), set()
+ modadd = mod_a | add_a | mod_b | add_b
+ common = modadd | rem_a | rem_b
+ if not common:
+ return 0
+
+ tmproot = tempfile.mkdtemp(prefix='extdiff.')
+ try:
+ # Always make a copy of node1a (and node1b, if applicable)
+ dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
+ dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
+ rev1a = '@%d' % repo[node1a].rev()
+ if do3way:
+ dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
+ dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
+ rev1b = '@%d' % repo[node1b].rev()
+ else:
+ dir1b = None
+ rev1b = ''
+
+ fns_and_mtime = []
+
+ # If node2 in not the wc or there is >1 change, copy it
+ dir2root = ''
+ rev2 = ''
+ if node2:
+ dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
+ rev2 = '@%d' % repo[node2].rev()
+ elif len(common) > 1:
+ #we only actually need to get the files to copy back to
+ #the working dir in this case (because the other cases
+ #are: diffing 2 revisions or single file -- in which case
+ #the file is already directly passed to the diff tool).
+ dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
+ else:
+ # This lets the diff tool open the changed file directly
+ dir2 = ''
+ dir2root = repo.root
+
+ label1a = rev1a
+ label1b = rev1b
+ label2 = rev2
+
+ # If only one change, diff the files instead of the directories
+ # Handle bogus modifies correctly by checking if the files exist
+ if len(common) == 1:
+ common_file = util.localpath(common.pop())
+ dir1a = os.path.join(tmproot, dir1a, common_file)
+ label1a = common_file + rev1a
+ if not os.path.isfile(dir1a):
+ dir1a = os.devnull
+ if do3way:
+ dir1b = os.path.join(tmproot, dir1b, common_file)
+ label1b = common_file + rev1b
+ if not os.path.isfile(dir1b):
+ dir1b = os.devnull
+ dir2 = os.path.join(dir2root, dir2, common_file)
+ label2 = common_file + rev2
+
+ # Function to quote file/dir names in the argument string.
+ # When not operating in 3-way mode, an empty string is
+ # returned for parent2
+ replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
+ plabel1=label1a, plabel2=label1b,
+ clabel=label2, child=dir2,
+ root=repo.root)
+ def quote(match):
+ key = match.group()[1:]
+ if not do3way and key == 'parent2':
+ return ''
+ return util.shellquote(replace[key])
+
+ # Match parent2 first, so 'parent1?' will match both parent1 and parent
+ regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
+ if not do3way and not re.search(regex, args):
+ args += ' $parent1 $child'
+ args = re.sub(regex, quote, args)
+ cmdline = util.shellquote(diffcmd) + ' ' + args
+
+ ui.debug('running %r in %s\n' % (cmdline, tmproot))
+ util.system(cmdline, cwd=tmproot, out=ui.fout)
+
+ for copy_fn, working_fn, mtime in fns_and_mtime:
+ if os.lstat(copy_fn).st_mtime != mtime:
+ ui.debug('file changed while diffing. '
+ 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
+ util.copyfile(copy_fn, working_fn)
+
+ return 1
+ finally:
+ ui.note(_('cleaning up temp directory\n'))
+ shutil.rmtree(tmproot)
+
+def extdiff(ui, repo, *pats, **opts):
+ '''use external program to diff repository (or selected files)
+
+ Show differences between revisions for the specified files, using
+ an external program. The default program used is diff, with
+ default options "-Npru".
+
+ To select a different program, use the -p/--program option. The
+ program will be passed the names of two directories to compare. To
+ pass additional options to the program, use -o/--option. These
+ will be passed before the names of the directories to compare.
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.'''
+ program = opts.get('program')
+ option = opts.get('option')
+ if not program:
+ program = 'diff'
+ option = option or ['-Npru']
+ return dodiff(ui, repo, program, option, pats, opts)
+
+cmdtable = {
+ "extdiff":
+ (extdiff,
+ [('p', 'program', '',
+ _('comparison program to run'), _('CMD')),
+ ('o', 'option', [],
+ _('pass option to comparison program'), _('OPT')),
+ ('r', 'rev', [],
+ _('revision'), _('REV')),
+ ('c', 'change', '',
+ _('change made by revision'), _('REV')),
+ ] + commands.walkopts,
+ _('hg extdiff [OPT]... [FILE]...')),
+ }
+
+def uisetup(ui):
+ for cmd, path in ui.configitems('extdiff'):
+ if cmd.startswith('cmd.'):
+ cmd = cmd[4:]
+ if not path:
+ path = cmd
+ diffopts = ui.config('extdiff', 'opts.' + cmd, '')
+ diffopts = diffopts and [diffopts] or []
+ elif cmd.startswith('opts.'):
+ continue
+ else:
+ # command = path opts
+ if path:
+ diffopts = shlex.split(path)
+ path = diffopts.pop(0)
+ else:
+ path, diffopts = cmd, []
+ # look for diff arguments in [diff-tools] then [merge-tools]
+ if diffopts == []:
+ args = ui.config('diff-tools', cmd+'.diffargs') or \
+ ui.config('merge-tools', cmd+'.diffargs')
+ if args:
+ diffopts = shlex.split(args)
+ def save(cmd, path, diffopts):
+ '''use closure to save diff command to use'''
+ def mydiff(ui, repo, *pats, **opts):
+ return dodiff(ui, repo, path, diffopts + opts['option'],
+ pats, opts)
+ doc = _('''\
+use %(path)s to diff repository (or selected files)
+
+ Show differences between revisions for the specified files, using
+ the %(path)s program.
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.\
+''') % dict(path=util.uirepr(path))
+
+ # We must translate the docstring right away since it is
+ # used as a format string. The string will unfortunately
+ # be translated again in commands.helpcmd and this will
+ # fail when the docstring contains non-ASCII characters.
+ # Decoding the string to a Unicode string here (using the
+ # right encoding) prevents that.
+ mydiff.__doc__ = doc.decode(encoding.encoding)
+ return mydiff
+ cmdtable[cmd] = (save(cmd, path, diffopts),
+ cmdtable['extdiff'][1][1:],
+ _('hg %s [OPTION]... [FILE]...') % cmd)
diff --git a/hgext/factotum.py b/hgext/factotum.py
new file mode 100644
index 0000000..098c5a2
--- /dev/null
+++ b/hgext/factotum.py
@@ -0,0 +1,120 @@
+# factotum.py - Plan 9 factotum integration for Mercurial
+#
+# Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''http authentication with factotum
+
+This extension allows the factotum(4) facility on Plan 9 from Bell Labs
+platforms to provide authentication information for HTTP access. Configuration
+entries specified in the auth section as well as authentication information
+provided in the repository URL are fully supported. If no prefix is specified,
+a value of "*" will be assumed.
+
+By default, keys are specified as::
+
+ proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
+
+If the factotum extension is unable to read the required key, one will be
+requested interactively.
+
+A configuration section is available to customize runtime behavior. By
+default, these entries are::
+
+ [factotum]
+ executable = /bin/auth/factotum
+ mountpoint = /mnt/factotum
+ service = hg
+
+The executable entry defines the full path to the factotum binary. The
+mountpoint entry defines the path to the factotum file service. Lastly, the
+service entry controls the service name used when reading keys.
+
+'''
+
+from mercurial.i18n import _
+from mercurial.url import passwordmgr
+from mercurial import httpconnection, urllib2, util
+import os
+
+ERRMAX = 128
+
+def auth_getkey(self, params):
+ if not self.ui.interactive():
+ raise util.Abort(_('factotum not interactive'))
+ if 'user=' not in params:
+ params = '%s user?' % params
+ params = '%s !password?' % params
+ os.system("%s -g '%s'" % (_executable, params))
+
+def auth_getuserpasswd(self, getkey, params):
+ params = 'proto=pass %s' % params
+ while True:
+ fd = os.open('%s/rpc' % _mountpoint, os.O_RDWR)
+ try:
+ try:
+ os.write(fd, 'start %s' % params)
+ l = os.read(fd, ERRMAX).split()
+ if l[0] == 'ok':
+ os.write(fd, 'read')
+ l = os.read(fd, ERRMAX).split()
+ if l[0] == 'ok':
+ return l[1:]
+ except (OSError, IOError):
+ raise util.Abort(_('factotum not responding'))
+ finally:
+ os.close(fd)
+ getkey(self, params)
+
+def monkeypatch_method(cls):
+ def decorator(func):
+ setattr(cls, func.__name__, func)
+ return func
+ return decorator
+
+@monkeypatch_method(passwordmgr)
+def find_user_password(self, realm, authuri):
+ user, passwd = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
+ self, realm, authuri)
+ if user and passwd:
+ self._writedebug(user, passwd)
+ return (user, passwd)
+
+ prefix = ''
+ res = httpconnection.readauthforuri(self.ui, authuri, user)
+ if res:
+ _, auth = res
+ prefix = auth.get('prefix')
+ user, passwd = auth.get('username'), auth.get('password')
+ if not user or not passwd:
+ if not prefix:
+ prefix = '*'
+ params = 'service=%s prefix=%s' % (_service, prefix)
+ if user:
+ params = '%s user=%s' % (params, user)
+ user, passwd = auth_getuserpasswd(self, auth_getkey, params)
+
+ self.add_password(realm, authuri, user, passwd)
+ self._writedebug(user, passwd)
+ return (user, passwd)
+
+def uisetup(ui):
+ global _executable
+ _executable = ui.config('factotum', 'executable', '/bin/auth/factotum')
+ global _mountpoint
+ _mountpoint = ui.config('factotum', 'mountpoint', '/mnt/factotum')
+ global _service
+ _service = ui.config('factotum', 'service', 'hg')
diff --git a/hgext/fetch.py b/hgext/fetch.py
new file mode 100644
index 0000000..491d8b2
--- /dev/null
+++ b/hgext/fetch.py
@@ -0,0 +1,158 @@
+# fetch.py - pull and merge remote changes
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''pull, update and merge in one command (DEPRECATED)'''
+
+from mercurial.i18n import _
+from mercurial.node import nullid, short
+from mercurial import commands, cmdutil, hg, util, error
+from mercurial.lock import release
+
+testedwith = 'internal'
+
+def fetch(ui, repo, source='default', **opts):
+ '''pull changes from a remote repository, merge new changes if needed.
+
+ This finds all changes from the repository at the specified path
+ or URL and adds them to the local repository.
+
+ If the pulled changes add a new branch head, the head is
+ automatically merged, and the result of the merge is committed.
+ Otherwise, the working directory is updated to include the new
+ changes.
+
+ When a merge is needed, the working directory is first updated to
+ the newly pulled changes. Local changes are then merged into the
+ pulled changes. To switch the merge order, use --switch-parent.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Returns 0 on success.
+ '''
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ parent, p2 = repo.dirstate.parents()
+ branch = repo.dirstate.branch()
+ try:
+ branchnode = repo.branchtip(branch)
+ except error.RepoLookupError:
+ branchnode = None
+ if parent != branchnode:
+ raise util.Abort(_('working dir not at branch tip '
+ '(use "hg update" to check out branch tip)'))
+
+ if p2 != nullid:
+ raise util.Abort(_('outstanding uncommitted merge'))
+
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ mod, add, rem, del_ = repo.status()[:4]
+
+ if mod or add or rem:
+ raise util.Abort(_('outstanding uncommitted changes'))
+ if del_:
+ raise util.Abort(_('working directory is missing some files'))
+ bheads = repo.branchheads(branch)
+ bheads = [head for head in bheads if len(repo[head].children()) == 0]
+ if len(bheads) > 1:
+ raise util.Abort(_('multiple heads in this branch '
+ '(use "hg heads ." and "hg merge" to merge)'))
+
+ other = hg.peer(repo, opts, ui.expandpath(source))
+ ui.status(_('pulling from %s\n') %
+ util.hidepassword(ui.expandpath(source)))
+ revs = None
+ if opts['rev']:
+ try:
+ revs = [other.lookup(rev) for rev in opts['rev']]
+ except error.CapabilityError:
+ err = _("other repository doesn't support revision lookup, "
+ "so a rev cannot be specified.")
+ raise util.Abort(err)
+
+ # Are there any changes at all?
+ modheads = repo.pull(other, heads=revs)
+ if modheads == 0:
+ return 0
+
+ # Is this a simple fast-forward along the current branch?
+ newheads = repo.branchheads(branch)
+ newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
+ if len(newheads) == 1 and len(newchildren):
+ if newchildren[0] != parent:
+ return hg.update(repo, newchildren[0])
+ else:
+ return 0
+
+ # Are there more than one additional branch heads?
+ newchildren = [n for n in newchildren if n != parent]
+ newparent = parent
+ if newchildren:
+ newparent = newchildren[0]
+ hg.clean(repo, newparent)
+ newheads = [n for n in newheads if n != newparent]
+ if len(newheads) > 1:
+ ui.status(_('not merging with %d other new branch heads '
+ '(use "hg heads ." and "hg merge" to merge them)\n') %
+ (len(newheads) - 1))
+ return 1
+
+ if not newheads:
+ return 0
+
+ # Otherwise, let's merge.
+ err = False
+ if newheads:
+ # By default, we consider the repository we're pulling
+ # *from* as authoritative, so we merge our changes into
+ # theirs.
+ if opts['switch_parent']:
+ firstparent, secondparent = newparent, newheads[0]
+ else:
+ firstparent, secondparent = newheads[0], newparent
+ ui.status(_('updating to %d:%s\n') %
+ (repo.changelog.rev(firstparent),
+ short(firstparent)))
+ hg.clean(repo, firstparent)
+ ui.status(_('merging with %d:%s\n') %
+ (repo.changelog.rev(secondparent), short(secondparent)))
+ err = hg.merge(repo, secondparent, remind=False)
+
+ if not err:
+ # we don't translate commit messages
+ message = (cmdutil.logmessage(ui, opts) or
+ ('Automated merge with %s' %
+ util.removeauth(other.url())))
+ editor = cmdutil.commiteditor
+ if opts.get('force_editor') or opts.get('edit'):
+ editor = cmdutil.commitforceeditor
+ n = repo.commit(message, opts['user'], opts['date'], editor=editor)
+ ui.status(_('new changeset %d:%s merges remote changes '
+ 'with local\n') % (repo.changelog.rev(n),
+ short(n)))
+
+ return err
+
+ finally:
+ release(lock, wlock)
+
+cmdtable = {
+ 'fetch':
+ (fetch,
+ [('r', 'rev', [],
+ _('a specific revision you would like to pull'), _('REV')),
+ ('e', 'edit', None, _('edit commit message')),
+ ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
+ ('', 'switch-parent', None, _('switch parents when merging')),
+ ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
+ _('hg fetch [SOURCE]')),
+}
diff --git a/hgext/gpg.py b/hgext/gpg.py
new file mode 100644
index 0000000..2ded54c
--- /dev/null
+++ b/hgext/gpg.py
@@ -0,0 +1,289 @@
+# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''commands to sign and verify changesets'''
+
+import os, tempfile, binascii
+from mercurial import util, commands, match, cmdutil
+from mercurial import node as hgnode
+from mercurial.i18n import _
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+class gpg(object):
+ def __init__(self, path, key=None):
+ self.path = path
+ self.key = (key and " --local-user \"%s\"" % key) or ""
+
+ def sign(self, data):
+ gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
+ return util.filter(data, gpgcmd)
+
+ def verify(self, data, sig):
+ """ returns of the good and bad signatures"""
+ sigfile = datafile = None
+ try:
+ # create temporary files
+ fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
+ fp = os.fdopen(fd, 'wb')
+ fp.write(sig)
+ fp.close()
+ fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
+ fp = os.fdopen(fd, 'wb')
+ fp.write(data)
+ fp.close()
+ gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
+ "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
+ ret = util.filter("", gpgcmd)
+ finally:
+ for f in (sigfile, datafile):
+ try:
+ if f:
+ os.unlink(f)
+ except OSError:
+ pass
+ keys = []
+ key, fingerprint = None, None
+ err = ""
+ for l in ret.splitlines():
+ # see DETAILS in the gnupg documentation
+ # filter the logger output
+ if not l.startswith("[GNUPG:]"):
+ continue
+ l = l[9:]
+ if l.startswith("ERRSIG"):
+ err = _("error while verifying signature")
+ break
+ elif l.startswith("VALIDSIG"):
+ # fingerprint of the primary key
+ fingerprint = l.split()[10]
+ elif (l.startswith("GOODSIG") or
+ l.startswith("EXPSIG") or
+ l.startswith("EXPKEYSIG") or
+ l.startswith("BADSIG")):
+ if key is not None:
+ keys.append(key + [fingerprint])
+ key = l.split(" ", 2)
+ fingerprint = None
+ if err:
+ return err, []
+ if key is not None:
+ keys.append(key + [fingerprint])
+ return err, keys
+
+def newgpg(ui, **opts):
+ """create a new gpg instance"""
+ gpgpath = ui.config("gpg", "cmd", "gpg")
+ gpgkey = opts.get('key')
+ if not gpgkey:
+ gpgkey = ui.config("gpg", "key", None)
+ return gpg(gpgpath, gpgkey)
+
+def sigwalk(repo):
+ """
+ walk over every sigs, yields a couple
+ ((node, version, sig), (filename, linenumber))
+ """
+ def parsefile(fileiter, context):
+ ln = 1
+ for l in fileiter:
+ if not l:
+ continue
+ yield (l.split(" ", 2), (context, ln))
+ ln += 1
+
+ # read the heads
+ fl = repo.file(".hgsigs")
+ for r in reversed(fl.heads()):
+ fn = ".hgsigs|%s" % hgnode.short(r)
+ for item in parsefile(fl.read(r).splitlines(), fn):
+ yield item
+ try:
+ # read local signatures
+ fn = "localsigs"
+ for item in parsefile(repo.opener(fn), fn):
+ yield item
+ except IOError:
+ pass
+
+def getkeys(ui, repo, mygpg, sigdata, context):
+ """get the keys who signed a data"""
+ fn, ln = context
+ node, version, sig = sigdata
+ prefix = "%s:%d" % (fn, ln)
+ node = hgnode.bin(node)
+
+ data = node2txt(repo, node, version)
+ sig = binascii.a2b_base64(sig)
+ err, keys = mygpg.verify(data, sig)
+ if err:
+ ui.warn("%s:%d %s\n" % (fn, ln , err))
+ return None
+
+ validkeys = []
+ # warn for expired key and/or sigs
+ for key in keys:
+ if key[0] == "BADSIG":
+ ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
+ continue
+ if key[0] == "EXPSIG":
+ ui.write(_("%s Note: Signature has expired"
+ " (signed by: \"%s\")\n") % (prefix, key[2]))
+ elif key[0] == "EXPKEYSIG":
+ ui.write(_("%s Note: This key has expired"
+ " (signed by: \"%s\")\n") % (prefix, key[2]))
+ validkeys.append((key[1], key[2], key[3]))
+ return validkeys
+
+@command("sigs", [], _('hg sigs'))
+def sigs(ui, repo):
+ """list signed changesets"""
+ mygpg = newgpg(ui)
+ revs = {}
+
+ for data, context in sigwalk(repo):
+ node, version, sig = data
+ fn, ln = context
+ try:
+ n = repo.lookup(node)
+ except KeyError:
+ ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
+ continue
+ r = repo.changelog.rev(n)
+ keys = getkeys(ui, repo, mygpg, data, context)
+ if not keys:
+ continue
+ revs.setdefault(r, [])
+ revs[r].extend(keys)
+ for rev in sorted(revs, reverse=True):
+ for k in revs[rev]:
+ r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
+ ui.write("%-30s %s\n" % (keystr(ui, k), r))
+
+@command("sigcheck", [], _('hg sigcheck REV'))
+def check(ui, repo, rev):
+ """verify all the signatures there may be for a particular revision"""
+ mygpg = newgpg(ui)
+ rev = repo.lookup(rev)
+ hexrev = hgnode.hex(rev)
+ keys = []
+
+ for data, context in sigwalk(repo):
+ node, version, sig = data
+ if node == hexrev:
+ k = getkeys(ui, repo, mygpg, data, context)
+ if k:
+ keys.extend(k)
+
+ if not keys:
+ ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
+ return
+
+ # print summary
+ ui.write("%s is signed by:\n" % hgnode.short(rev))
+ for key in keys:
+ ui.write(" %s\n" % keystr(ui, key))
+
+def keystr(ui, key):
+ """associate a string to a key (username, comment)"""
+ keyid, user, fingerprint = key
+ comment = ui.config("gpg", fingerprint, None)
+ if comment:
+ return "%s (%s)" % (user, comment)
+ else:
+ return user
+
+@command("sign",
+ [('l', 'local', None, _('make the signature local')),
+ ('f', 'force', None, _('sign even if the sigfile is modified')),
+ ('', 'no-commit', None, _('do not commit the sigfile after signing')),
+ ('k', 'key', '',
+ _('the key id to sign with'), _('ID')),
+ ('m', 'message', '',
+ _('commit message'), _('TEXT')),
+ ] + commands.commitopts2,
+ _('hg sign [OPTION]... [REV]...'))
+def sign(ui, repo, *revs, **opts):
+ """add a signature for the current or given revision
+
+ If no revision is given, the parent of the working directory is used,
+ or tip if no revision is checked out.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+ """
+
+ mygpg = newgpg(ui, **opts)
+ sigver = "0"
+ sigmessage = ""
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ if revs:
+ nodes = [repo.lookup(n) for n in revs]
+ else:
+ nodes = [node for node in repo.dirstate.parents()
+ if node != hgnode.nullid]
+ if len(nodes) > 1:
+ raise util.Abort(_('uncommitted merge - please provide a '
+ 'specific revision'))
+ if not nodes:
+ nodes = [repo.changelog.tip()]
+
+ for n in nodes:
+ hexnode = hgnode.hex(n)
+ ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
+ hgnode.short(n)))
+ # build data
+ data = node2txt(repo, n, sigver)
+ sig = mygpg.sign(data)
+ if not sig:
+ raise util.Abort(_("error while signing"))
+ sig = binascii.b2a_base64(sig)
+ sig = sig.replace("\n", "")
+ sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
+
+ # write it
+ if opts['local']:
+ repo.opener.append("localsigs", sigmessage)
+ return
+
+ msigs = match.exact(repo.root, '', ['.hgsigs'])
+ s = repo.status(match=msigs, unknown=True, ignored=True)[:6]
+ if util.any(s) and not opts["force"]:
+ raise util.Abort(_("working copy of .hgsigs is changed "
+ "(please commit .hgsigs manually "
+ "or use --force)"))
+
+ sigsfile = repo.wfile(".hgsigs", "ab")
+ sigsfile.write(sigmessage)
+ sigsfile.close()
+
+ if '.hgsigs' not in repo.dirstate:
+ repo[None].add([".hgsigs"])
+
+ if opts["no_commit"]:
+ return
+
+ message = opts['message']
+ if not message:
+ # we don't translate commit messages
+ message = "\n".join(["Added signature for changeset %s"
+ % hgnode.short(n)
+ for n in nodes])
+ try:
+ repo.commit(message, opts['user'], opts['date'], match=msigs)
+ except ValueError, inst:
+ raise util.Abort(str(inst))
+
+def node2txt(repo, node, ver):
+ """map a manifest into some text"""
+ if ver == "0":
+ return "%s\n" % hgnode.hex(node)
+ else:
+ raise util.Abort(_("unknown signature version"))
diff --git a/hgext/graphlog.py b/hgext/graphlog.py
new file mode 100644
index 0000000..9caed24
--- /dev/null
+++ b/hgext/graphlog.py
@@ -0,0 +1,54 @@
+# ASCII graph log extension for Mercurial
+#
+# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to view revision graphs from a shell
+
+This extension adds a --graph option to the incoming, outgoing and log
+commands. When this options is given, an ASCII representation of the
+revision graph is also shown.
+'''
+
+from mercurial.i18n import _
+from mercurial import cmdutil, commands
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+@command('glog',
+ [('f', 'follow', None,
+ _('follow changeset history, or file history across copies and renames')),
+ ('', 'follow-first', None,
+ _('only follow the first parent of merge changesets (DEPRECATED)')),
+ ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
+ ('C', 'copies', None, _('show copied files')),
+ ('k', 'keyword', [],
+ _('do case-insensitive search for a given text'), _('TEXT')),
+ ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
+ ('', 'removed', None, _('include revisions where files were removed')),
+ ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
+ ('u', 'user', [], _('revisions committed by user'), _('USER')),
+ ('', 'only-branch', [],
+ _('show only changesets within the given named branch (DEPRECATED)'),
+ _('BRANCH')),
+ ('b', 'branch', [],
+ _('show changesets within the given named branch'), _('BRANCH')),
+ ('P', 'prune', [],
+ _('do not display revision or any of its ancestors'), _('REV')),
+ ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
+ ] + commands.logopts + commands.walkopts,
+ _('[OPTION]... [FILE]'))
+def graphlog(ui, repo, *pats, **opts):
+ """show revision history alongside an ASCII revision graph
+
+ Print a revision history alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+ return cmdutil.graphlog(ui, repo, *pats, **opts)
diff --git a/hgext/hgcia.py b/hgext/hgcia.py
new file mode 100644
index 0000000..075840a
--- /dev/null
+++ b/hgext/hgcia.py
@@ -0,0 +1,277 @@
+# Copyright (C) 2007-8 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""hooks for integrating with the CIA.vc notification service
+
+This is meant to be run as a changegroup or incoming hook. To
+configure it, set the following options in your hgrc::
+
+ [cia]
+ # your registered CIA user name
+ user = foo
+ # the name of the project in CIA
+ project = foo
+ # the module (subproject) (optional)
+ #module = foo
+ # Append a diffstat to the log message (optional)
+ #diffstat = False
+ # Template to use for log messages (optional)
+ #template = {desc}\\n{baseurl}{webroot}/rev/{node}-- {diffstat}
+ # Style to use (optional)
+ #style = foo
+ # The URL of the CIA notification service (optional)
+ # You can use mailto: URLs to send by email, eg
+ # mailto:cia@cia.vc
+ # Make sure to set email.from if you do this.
+ #url = http://cia.vc/
+ # print message instead of sending it (optional)
+ #test = False
+ # number of slashes to strip for url paths
+ #strip = 0
+
+ [hooks]
+ # one of these:
+ changegroup.cia = python:hgcia.hook
+ #incoming.cia = python:hgcia.hook
+
+ [web]
+ # If you want hyperlinks (optional)
+ baseurl = http://server/path/to/repo
+"""
+
+from mercurial.i18n import _
+from mercurial.node import bin, short
+from mercurial import cmdutil, patch, templater, util, mail
+import email.Parser
+
+import socket, xmlrpclib
+from xml.sax import saxutils
+testedwith = 'internal'
+
+socket_timeout = 30 # seconds
+if util.safehasattr(socket, 'setdefaulttimeout'):
+ # set a timeout for the socket so you don't have to wait so looooong
+ # when cia.vc is having problems. requires python >= 2.3:
+ socket.setdefaulttimeout(socket_timeout)
+
+HGCIA_VERSION = '0.1'
+HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'
+
+
+class ciamsg(object):
+ """ A CIA message """
+ def __init__(self, cia, ctx):
+ self.cia = cia
+ self.ctx = ctx
+ self.url = self.cia.url
+ if self.url:
+ self.url += self.cia.root
+
+ def fileelem(self, path, uri, action):
+ if uri:
+ uri = ' uri=%s' % saxutils.quoteattr(uri)
+ return '<file%s action=%s>%s</file>' % (
+ uri, saxutils.quoteattr(action), saxutils.escape(path))
+
+ def fileelems(self):
+ n = self.ctx.node()
+ f = self.cia.repo.status(self.ctx.p1().node(), n)
+ url = self.url or ''
+ if url and url[-1] == '/':
+ url = url[:-1]
+ elems = []
+ for path in f[0]:
+ uri = '%s/diff/%s/%s' % (url, short(n), path)
+ elems.append(self.fileelem(path, url and uri, 'modify'))
+ for path in f[1]:
+ # TODO: copy/rename ?
+ uri = '%s/file/%s/%s' % (url, short(n), path)
+ elems.append(self.fileelem(path, url and uri, 'add'))
+ for path in f[2]:
+ elems.append(self.fileelem(path, '', 'remove'))
+
+ return '\n'.join(elems)
+
+ def sourceelem(self, project, module=None, branch=None):
+ msg = ['<source>', '<project>%s</project>' % saxutils.escape(project)]
+ if module:
+ msg.append('<module>%s</module>' % saxutils.escape(module))
+ if branch:
+ msg.append('<branch>%s</branch>' % saxutils.escape(branch))
+ msg.append('</source>')
+
+ return '\n'.join(msg)
+
+ def diffstat(self):
+ class patchbuf(object):
+ def __init__(self):
+ self.lines = []
+ # diffstat is stupid
+ self.name = 'cia'
+ def write(self, data):
+ self.lines += data.splitlines(True)
+ def close(self):
+ pass
+
+ n = self.ctx.node()
+ pbuf = patchbuf()
+ cmdutil.export(self.cia.repo, [n], fp=pbuf)
+ return patch.diffstat(pbuf.lines) or ''
+
+ def logmsg(self):
+ diffstat = self.cia.diffstat and self.diffstat() or ''
+ self.cia.ui.pushbuffer()
+ self.cia.templater.show(self.ctx, changes=self.ctx.changeset(),
+ baseurl=self.cia.ui.config('web', 'baseurl'),
+ url=self.url, diffstat=diffstat,
+ webroot=self.cia.root)
+ return self.cia.ui.popbuffer()
+
+ def xml(self):
+ n = short(self.ctx.node())
+ src = self.sourceelem(self.cia.project, module=self.cia.module,
+ branch=self.ctx.branch())
+ # unix timestamp
+ dt = self.ctx.date()
+ timestamp = dt[0]
+
+ author = saxutils.escape(self.ctx.user())
+ rev = '%d:%s' % (self.ctx.rev(), n)
+ log = saxutils.escape(self.logmsg())
+
+ url = self.url
+ if url and url[-1] == '/':
+ url = url[:-1]
+ url = url and '<url>%s/rev/%s</url>' % (saxutils.escape(url), n) or ''
+
+ msg = """
+<message>
+ <generator>
+ <name>Mercurial (hgcia)</name>
+ <version>%s</version>
+ <url>%s</url>
+ <user>%s</user>
+ </generator>
+ %s
+ <body>
+ <commit>
+ <author>%s</author>
+ <version>%s</version>
+ <log>%s</log>
+ %s
+ <files>%s</files>
+ </commit>
+ </body>
+ <timestamp>%d</timestamp>
+</message>
+""" % \
+ (HGCIA_VERSION, saxutils.escape(HGCIA_URL),
+ saxutils.escape(self.cia.user), src, author, rev, log, url,
+ self.fileelems(), timestamp)
+
+ return msg
+
+
+class hgcia(object):
+ """ CIA notification class """
+
+ deftemplate = '{desc}'
+ dstemplate = '{desc}\n-- \n{diffstat}'
+
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+
+ self.ciaurl = self.ui.config('cia', 'url', 'http://cia.vc')
+ self.user = self.ui.config('cia', 'user')
+ self.project = self.ui.config('cia', 'project')
+ self.module = self.ui.config('cia', 'module')
+ self.diffstat = self.ui.configbool('cia', 'diffstat')
+ self.emailfrom = self.ui.config('email', 'from')
+ self.dryrun = self.ui.configbool('cia', 'test')
+ self.url = self.ui.config('web', 'baseurl')
+ # Default to -1 for backward compatibility
+ self.stripcount = int(self.ui.config('cia', 'strip', -1))
+ self.root = self.strip(self.repo.root)
+
+ style = self.ui.config('cia', 'style')
+ template = self.ui.config('cia', 'template')
+ if not template:
+ template = self.diffstat and self.dstemplate or self.deftemplate
+ template = templater.parsestring(template, quoted=False)
+ t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
+ style, False)
+ t.use_template(template)
+ self.templater = t
+
+ def strip(self, path):
+ '''strip leading slashes from local path, turn into web-safe path.'''
+
+ path = util.pconvert(path)
+ count = self.stripcount
+ if count < 0:
+ return ''
+ while count > 0:
+ c = path.find('/')
+ if c == -1:
+ break
+ path = path[c + 1:]
+ count -= 1
+ return path
+
+ def sendrpc(self, msg):
+ srv = xmlrpclib.Server(self.ciaurl)
+ res = srv.hub.deliver(msg)
+ if res is not True and res != 'queued.':
+ raise util.Abort(_('%s returned an error: %s') %
+ (self.ciaurl, res))
+
+ def sendemail(self, address, data):
+ p = email.Parser.Parser()
+ msg = p.parsestr(data)
+ msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+ msg['To'] = address
+ msg['From'] = self.emailfrom
+ msg['Subject'] = 'DeliverXML'
+ msg['Content-type'] = 'text/xml'
+ msgtext = msg.as_string()
+
+ self.ui.status(_('hgcia: sending update to %s\n') % address)
+ mail.sendmail(self.ui, util.email(self.emailfrom),
+ [address], msgtext)
+
+
+def hook(ui, repo, hooktype, node=None, url=None, **kwargs):
+ """ send CIA notification """
+ def sendmsg(cia, ctx):
+ msg = ciamsg(cia, ctx).xml()
+ if cia.dryrun:
+ ui.write(msg)
+ elif cia.ciaurl.startswith('mailto:'):
+ if not cia.emailfrom:
+ raise util.Abort(_('email.from must be defined when '
+ 'sending by email'))
+ cia.sendemail(cia.ciaurl[7:], msg)
+ else:
+ cia.sendrpc(msg)
+
+ n = bin(node)
+ cia = hgcia(ui, repo)
+ if not cia.user:
+ ui.debug('cia: no user specified')
+ return
+ if not cia.project:
+ ui.debug('cia: no project specified')
+ return
+ if hooktype == 'changegroup':
+ start = repo.changelog.rev(n)
+ end = len(repo.changelog)
+ for rev in xrange(start, end):
+ n = repo.changelog.node(rev)
+ ctx = repo.changectx(n)
+ sendmsg(cia, ctx)
+ else:
+ ctx = repo.changectx(n)
+ sendmsg(cia, ctx)
diff --git a/hgext/hgk.py b/hgext/hgk.py
new file mode 100644
index 0000000..304b910
--- /dev/null
+++ b/hgext/hgk.py
@@ -0,0 +1,352 @@
+# Minimal support for git commands on an hg repository
+#
+# Copyright 2005, 2006 Chris Mason <mason@suse.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''browse the repository in a graphical way
+
+The hgk extension allows browsing the history of a repository in a
+graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
+distributed with Mercurial.)
+
+hgk consists of two parts: a Tcl script that does the displaying and
+querying of information, and an extension to Mercurial named hgk.py,
+which provides hooks for hgk to get information. hgk can be found in
+the contrib directory, and the extension is shipped in the hgext
+repository, and needs to be enabled.
+
+The :hg:`view` command will launch the hgk Tcl script. For this command
+to work, hgk must be in your search path. Alternately, you can specify
+the path to hgk in your configuration file::
+
+ [hgk]
+ path=/location/of/hgk
+
+hgk can make use of the extdiff extension to visualize revisions.
+Assuming you had already configured extdiff vdiff command, just add::
+
+ [hgk]
+ vdiff=vdiff
+
+Revisions context menu will now display additional entries to fire
+vdiff on hovered and selected revisions.
+'''
+
+import os
+from mercurial import commands, util, patch, revlog, scmutil
+from mercurial.node import nullid, nullrev, short
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+def difftree(ui, repo, node1=None, node2=None, *files, **opts):
+ """diff trees from two commits"""
+ def __difftree(repo, node1, node2, files=[]):
+ assert node2 is not None
+ mmap = repo[node1].manifest()
+ mmap2 = repo[node2].manifest()
+ m = scmutil.match(repo[node1], files)
+ modified, added, removed = repo.status(node1, node2, m)[:3]
+ empty = short(nullid)
+
+ for f in modified:
+ # TODO get file permissions
+ ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
+ (short(mmap[f]), short(mmap2[f]), f, f))
+ for f in added:
+ ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
+ (empty, short(mmap2[f]), f, f))
+ for f in removed:
+ ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
+ (short(mmap[f]), empty, f, f))
+ ##
+
+ while True:
+ if opts['stdin']:
+ try:
+ line = raw_input().split(' ')
+ node1 = line[0]
+ if len(line) > 1:
+ node2 = line[1]
+ else:
+ node2 = None
+ except EOFError:
+ break
+ node1 = repo.lookup(node1)
+ if node2:
+ node2 = repo.lookup(node2)
+ else:
+ node2 = node1
+ node1 = repo.changelog.parents(node1)[0]
+ if opts['patch']:
+ if opts['pretty']:
+ catcommit(ui, repo, node2, "")
+ m = scmutil.match(repo[node1], files)
+ chunks = patch.diff(repo, node1, node2, match=m,
+ opts=patch.diffopts(ui, {'git': True}))
+ for chunk in chunks:
+ ui.write(chunk)
+ else:
+ __difftree(repo, node1, node2, files=files)
+ if not opts['stdin']:
+ break
+
+def catcommit(ui, repo, n, prefix, ctx=None):
+ nlprefix = '\n' + prefix
+ if ctx is None:
+ ctx = repo[n]
+ # use ctx.node() instead ??
+ ui.write("tree %s\n" % short(ctx.changeset()[0]))
+ for p in ctx.parents():
+ ui.write("parent %s\n" % p)
+
+ date = ctx.date()
+ description = ctx.description().replace("\0", "")
+ lines = description.splitlines()
+ if lines and lines[-1].startswith('committer:'):
+ committer = lines[-1].split(': ')[1].rstrip()
+ else:
+ committer = ctx.user()
+
+ ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
+ ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
+ ui.write("revision %d\n" % ctx.rev())
+ ui.write("branch %s\n\n" % ctx.branch())
+
+ if prefix != "":
+ ui.write("%s%s\n" % (prefix,
+ description.replace('\n', nlprefix).strip()))
+ else:
+ ui.write(description + "\n")
+ if prefix:
+ ui.write('\0')
+
+def base(ui, repo, node1, node2):
+ """output common ancestor information"""
+ node1 = repo.lookup(node1)
+ node2 = repo.lookup(node2)
+ n = repo.changelog.ancestor(node1, node2)
+ ui.write(short(n) + "\n")
+
+def catfile(ui, repo, type=None, r=None, **opts):
+ """cat a specific revision"""
+ # in stdin mode, every line except the commit is prefixed with two
+ # spaces. This way the our caller can find the commit without magic
+ # strings
+ #
+ prefix = ""
+ if opts['stdin']:
+ try:
+ (type, r) = raw_input().split(' ')
+ prefix = " "
+ except EOFError:
+ return
+
+ else:
+ if not type or not r:
+ ui.warn(_("cat-file: type or revision not supplied\n"))
+ commands.help_(ui, 'cat-file')
+
+ while r:
+ if type != "commit":
+ ui.warn(_("aborting hg cat-file only understands commits\n"))
+ return 1
+ n = repo.lookup(r)
+ catcommit(ui, repo, n, prefix)
+ if opts['stdin']:
+ try:
+ (type, r) = raw_input().split(' ')
+ except EOFError:
+ break
+ else:
+ break
+
+# git rev-tree is a confusing thing. You can supply a number of
+# commit sha1s on the command line, and it walks the commit history
+# telling you which commits are reachable from the supplied ones via
+# a bitmask based on arg position.
+# you can specify a commit to stop at by starting the sha1 with ^
+def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
+ def chlogwalk():
+ count = len(repo)
+ i = count
+ l = [0] * 100
+ chunk = 100
+ while True:
+ if chunk > i:
+ chunk = i
+ i = 0
+ else:
+ i -= chunk
+
+ for x in xrange(chunk):
+ if i + x >= count:
+ l[chunk - x:] = [0] * (chunk - x)
+ break
+ if full is not None:
+ l[x] = repo[i + x]
+ l[x].changeset() # force reading
+ else:
+ l[x] = 1
+ for x in xrange(chunk - 1, -1, -1):
+ if l[x] != 0:
+ yield (i + x, full is not None and l[x] or None)
+ if i == 0:
+ break
+
+ # calculate and return the reachability bitmask for sha
+ def is_reachable(ar, reachable, sha):
+ if len(ar) == 0:
+ return 1
+ mask = 0
+ for i in xrange(len(ar)):
+ if sha in reachable[i]:
+ mask |= 1 << i
+
+ return mask
+
+ reachable = []
+ stop_sha1 = []
+ want_sha1 = []
+ count = 0
+
+ # figure out which commits they are asking for and which ones they
+ # want us to stop on
+ for i, arg in enumerate(args):
+ if arg.startswith('^'):
+ s = repo.lookup(arg[1:])
+ stop_sha1.append(s)
+ want_sha1.append(s)
+ elif arg != 'HEAD':
+ want_sha1.append(repo.lookup(arg))
+
+ # calculate the graph for the supplied commits
+ for i, n in enumerate(want_sha1):
+ reachable.append(set())
+ visit = [n]
+ reachable[i].add(n)
+ while visit:
+ n = visit.pop(0)
+ if n in stop_sha1:
+ continue
+ for p in repo.changelog.parents(n):
+ if p not in reachable[i]:
+ reachable[i].add(p)
+ visit.append(p)
+ if p in stop_sha1:
+ continue
+
+ # walk the repository looking for commits that are in our
+ # reachability graph
+ for i, ctx in chlogwalk():
+ n = repo.changelog.node(i)
+ mask = is_reachable(want_sha1, reachable, n)
+ if mask:
+ parentstr = ""
+ if parents:
+ pp = repo.changelog.parents(n)
+ if pp[0] != nullid:
+ parentstr += " " + short(pp[0])
+ if pp[1] != nullid:
+ parentstr += " " + short(pp[1])
+ if not full:
+ ui.write("%s%s\n" % (short(n), parentstr))
+ elif full == "commit":
+ ui.write("%s%s\n" % (short(n), parentstr))
+ catcommit(ui, repo, n, ' ', ctx)
+ else:
+ (p1, p2) = repo.changelog.parents(n)
+ (h, h1, h2) = map(short, (n, p1, p2))
+ (i1, i2) = map(repo.changelog.rev, (p1, p2))
+
+ date = ctx.date()[0]
+ ui.write("%s %s:%s" % (date, h, mask))
+ mask = is_reachable(want_sha1, reachable, p1)
+ if i1 != nullrev and mask > 0:
+ ui.write("%s:%s " % (h1, mask)),
+ mask = is_reachable(want_sha1, reachable, p2)
+ if i2 != nullrev and mask > 0:
+ ui.write("%s:%s " % (h2, mask))
+ ui.write("\n")
+ if maxnr and count >= maxnr:
+ break
+ count += 1
+
+def revparse(ui, repo, *revs, **opts):
+ """parse given revisions"""
+ def revstr(rev):
+ if rev == 'HEAD':
+ rev = 'tip'
+ return revlog.hex(repo.lookup(rev))
+
+ for r in revs:
+ revrange = r.split(':', 1)
+ ui.write('%s\n' % revstr(revrange[0]))
+ if len(revrange) == 2:
+ ui.write('^%s\n' % revstr(revrange[1]))
+
+# git rev-list tries to order things by date, and has the ability to stop
+# at a given commit without walking the whole repo. TODO add the stop
+# parameter
+def revlist(ui, repo, *revs, **opts):
+ """print revisions"""
+ if opts['header']:
+ full = "commit"
+ else:
+ full = None
+ copy = [x for x in revs]
+ revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
+
+def config(ui, repo, **opts):
+ """print extension options"""
+ def writeopt(name, value):
+ ui.write('k=%s\nv=%s\n' % (name, value))
+
+ writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
+
+
+def view(ui, repo, *etc, **opts):
+ "start interactive history viewer"
+ os.chdir(repo.root)
+ optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
+ cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
+ ui.debug("running %s\n" % cmd)
+ util.system(cmd)
+
+cmdtable = {
+ "^view":
+ (view,
+ [('l', 'limit', '',
+ _('limit number of changes displayed'), _('NUM'))],
+ _('hg view [-l LIMIT] [REVRANGE]')),
+ "debug-diff-tree":
+ (difftree,
+ [('p', 'patch', None, _('generate patch')),
+ ('r', 'recursive', None, _('recursive')),
+ ('P', 'pretty', None, _('pretty')),
+ ('s', 'stdin', None, _('stdin')),
+ ('C', 'copy', None, _('detect copies')),
+ ('S', 'search', "", _('search'))],
+ _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
+ "debug-cat-file":
+ (catfile,
+ [('s', 'stdin', None, _('stdin'))],
+ _('hg debug-cat-file [OPTION]... TYPE FILE')),
+ "debug-config":
+ (config, [], _('hg debug-config')),
+ "debug-merge-base":
+ (base, [], _('hg debug-merge-base REV REV')),
+ "debug-rev-parse":
+ (revparse,
+ [('', 'default', '', _('ignored'))],
+ _('hg debug-rev-parse REV')),
+ "debug-rev-list":
+ (revlist,
+ [('H', 'header', None, _('header')),
+ ('t', 'topo-order', None, _('topo-order')),
+ ('p', 'parents', None, _('parents')),
+ ('n', 'max-count', 0, _('max-count'))],
+ _('hg debug-rev-list [OPTION]... REV...')),
+}
diff --git a/hgext/highlight/__init__.py b/hgext/highlight/__init__.py
new file mode 100644
index 0000000..fc47815
--- /dev/null
+++ b/hgext/highlight/__init__.py
@@ -0,0 +1,64 @@
+# highlight - syntax highlighting in hgweb, based on Pygments
+#
+# Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# The original module was split in an interface and an implementation
+# file to defer pygments loading and speedup extension setup.
+
+"""syntax highlighting for hgweb (requires Pygments)
+
+It depends on the Pygments syntax highlighting library:
+http://pygments.org/
+
+There is a single configuration option::
+
+ [web]
+ pygments_style = <style>
+
+The default is 'colorful'.
+"""
+
+import highlight
+from mercurial.hgweb import webcommands, webutil, common
+from mercurial import extensions, encoding
+testedwith = 'internal'
+
+def filerevision_highlight(orig, web, tmpl, fctx):
+ mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+ # only pygmentize for mimetype containing 'html' so we both match
+ # 'text/html' and possibly 'application/xhtml+xml' in the future
+ # so that we don't have to touch the extension when the mimetype
+ # for a template changes; also hgweb optimizes the case that a
+ # raw file is sent using rawfile() and doesn't call us, so we
+ # can't clash with the file's content-type here in case we
+ # pygmentize a html file
+ if 'html' in mt:
+ style = web.config('web', 'pygments_style', 'colorful')
+ highlight.pygmentize('fileline', fctx, style, tmpl)
+ return orig(web, tmpl, fctx)
+
+def annotate_highlight(orig, web, req, tmpl):
+ mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+ if 'html' in mt:
+ fctx = webutil.filectx(web.repo, req)
+ style = web.config('web', 'pygments_style', 'colorful')
+ highlight.pygmentize('annotateline', fctx, style, tmpl)
+ return orig(web, req, tmpl)
+
+def generate_css(web, req, tmpl):
+ pg_style = web.config('web', 'pygments_style', 'colorful')
+ fmter = highlight.HtmlFormatter(style = pg_style)
+ req.respond(common.HTTP_OK, 'text/css')
+ return ['/* pygments_style = %s */\n\n' % pg_style,
+ fmter.get_style_defs('')]
+
+def extsetup():
+ # monkeypatch in the new version
+ extensions.wrapfunction(webcommands, '_filerevision',
+ filerevision_highlight)
+ extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
+ webcommands.highlightcss = generate_css
+ webcommands.__all__.append('highlightcss')
diff --git a/hgext/highlight/highlight.py b/hgext/highlight/highlight.py
new file mode 100644
index 0000000..a8265cf
--- /dev/null
+++ b/hgext/highlight/highlight.py
@@ -0,0 +1,61 @@
+# highlight.py - highlight extension implementation file
+#
+# Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# The original module was split in an interface and an implementation
+# file to defer pygments loading and speedup extension setup.
+
+from mercurial import demandimport
+demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__'])
+from mercurial import util, encoding
+
+from pygments import highlight
+from pygments.util import ClassNotFound
+from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
+from pygments.formatters import HtmlFormatter
+
+SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
+ 'type="text/css" />')
+
+def pygmentize(field, fctx, style, tmpl):
+
+ # append a <link ...> to the syntax highlighting css
+ old_header = tmpl.load('header')
+ if SYNTAX_CSS not in old_header:
+ new_header = old_header + SYNTAX_CSS
+ tmpl.cache['header'] = new_header
+
+ text = fctx.data()
+ if util.binary(text):
+ return
+
+ # Pygments is best used with Unicode strings:
+ # <http://pygments.org/docs/unicode/>
+ text = text.decode(encoding.encoding, 'replace')
+
+ # To get multi-line strings right, we can't format line-by-line
+ try:
+ lexer = guess_lexer_for_filename(fctx.path(), text[:1024])
+ except (ClassNotFound, ValueError):
+ try:
+ lexer = guess_lexer(text[:1024])
+ except (ClassNotFound, ValueError):
+ lexer = TextLexer()
+
+ formatter = HtmlFormatter(style=style)
+
+ colorized = highlight(text, lexer, formatter)
+ # strip wrapping div
+ colorized = colorized[:colorized.find('\n</pre>')]
+ colorized = colorized[colorized.find('<pre>')+5:]
+ coloriter = (s.encode(encoding.encoding, 'replace')
+ for s in colorized.splitlines())
+
+ tmpl.filters['colorize'] = lambda x: coloriter.next()
+
+ oldl = tmpl.cache[field]
+ newl = oldl.replace('line|escape', 'line|colorize')
+ tmpl.cache[field] = newl
diff --git a/hgext/histedit.py b/hgext/histedit.py
new file mode 100644
index 0000000..88e0e93
--- /dev/null
+++ b/hgext/histedit.py
@@ -0,0 +1,715 @@
+# histedit.py - interactive history editing for mercurial
+#
+# Copyright 2009 Augie Fackler <raf@durin42.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""interactive history editing
+
+With this extension installed, Mercurial gains one new command: histedit. Usage
+is as follows, assuming the following history::
+
+ @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
+ | Add delta
+ |
+ o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
+ | Add gamma
+ |
+ o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
+ | Add beta
+ |
+ o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
+ Add alpha
+
+If you were to run ``hg histedit c561b4e977df``, you would see the following
+file open in your editor::
+
+ pick c561b4e977df Add beta
+ pick 030b686bedc4 Add gamma
+ pick 7c2fd3b9020c Add delta
+
+ # Edit history between 633536316234 and 7c2fd3b9020c
+ #
+ # Commands:
+ # p, pick = use commit
+ # e, edit = use commit, but stop for amending
+ # f, fold = use commit, but fold into previous commit
+ # d, drop = remove commit from history
+ # m, mess = edit message without changing commit content
+ #
+
+In this file, lines beginning with ``#`` are ignored. You must specify a rule
+for each revision in your history. For example, if you had meant to add gamma
+before beta, and then wanted to add delta in the same revision as beta, you
+would reorganize the file to look like this::
+
+ pick 030b686bedc4 Add gamma
+ pick c561b4e977df Add beta
+ fold 7c2fd3b9020c Add delta
+
+ # Edit history between 633536316234 and 7c2fd3b9020c
+ #
+ # Commands:
+ # p, pick = use commit
+ # e, edit = use commit, but stop for amending
+ # f, fold = use commit, but fold into previous commit
+ # d, drop = remove commit from history
+ # m, mess = edit message without changing commit content
+ #
+
+At which point you close the editor and ``histedit`` starts working. When you
+specify a ``fold`` operation, ``histedit`` will open an editor when it folds
+those revisions together, offering you a chance to clean up the commit message::
+
+ Add beta
+ ***
+ Add delta
+
+Edit the commit message to your liking, then close the editor. For
+this example, let's assume that the commit message was changed to
+``Add beta and delta.`` After histedit has run and had a chance to
+remove any old or temporary revisions it needed, the history looks
+like this::
+
+ @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
+ | Add beta and delta.
+ |
+ o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
+ | Add gamma
+ |
+ o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
+ Add alpha
+
+Note that ``histedit`` does *not* remove any revisions (even its own temporary
+ones) until after it has completed all the editing operations, so it will
+probably perform several strip operations when it's done. For the above example,
+it had to run strip twice. Strip can be slow depending on a variety of factors,
+so you might need to be a little patient. You can choose to keep the original
+revisions by passing the ``--keep`` flag.
+
+The ``edit`` operation will drop you back to a command prompt,
+allowing you to edit files freely, or even use ``hg record`` to commit
+some changes as a separate commit. When you're done, any remaining
+uncommitted changes will be committed as well. When done, run ``hg
+histedit --continue`` to finish this step. You'll be prompted for a
+new commit message, but the default commit message will be the
+original message for the ``edit`` ed revision.
+
+The ``message`` operation will give you a chance to revise a commit
+message without changing the contents. It's a shortcut for doing
+``edit`` immediately followed by `hg histedit --continue``.
+
+If ``histedit`` encounters a conflict when moving a revision (while
+handling ``pick`` or ``fold``), it'll stop in a similar manner to
+``edit`` with the difference that it won't prompt you for a commit
+message when done. If you decide at this point that you don't like how
+much work it will be to rearrange history, or that you made a mistake,
+you can use ``hg histedit --abort`` to abandon the new changes you
+have made and return to the state before you attempted to edit your
+history.
+
+If we clone the example repository above and add three more changes, such that
+we have the following history::
+
+ @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
+ | Add theta
+ |
+ o 5 140988835471 2009-04-27 18:04 -0500 stefan
+ | Add eta
+ |
+ o 4 122930637314 2009-04-27 18:04 -0500 stefan
+ | Add zeta
+ |
+ o 3 836302820282 2009-04-27 18:04 -0500 stefan
+ | Add epsilon
+ |
+ o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
+ | Add beta and delta.
+ |
+ o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
+ | Add gamma
+ |
+ o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
+ Add alpha
+
+If you run ``hg histedit --outgoing`` on the clone then it is the same
+as running ``hg histedit 836302820282``. If you need plan to push to a
+repository that Mercurial does not detect to be related to the source
+repo, you can add a ``--force`` option.
+"""
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+import tempfile
+import os
+
+from mercurial import bookmarks
+from mercurial import cmdutil
+from mercurial import discovery
+from mercurial import error
+from mercurial import hg
+from mercurial import lock as lockmod
+from mercurial import node
+from mercurial import patch
+from mercurial import repair
+from mercurial import scmutil
+from mercurial import util
+from mercurial.i18n import _
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+
+testedwith = 'internal'
+
+editcomment = _("""# Edit history between %s and %s
+#
+# Commands:
+# p, pick = use commit
+# e, edit = use commit, but stop for amending
+# f, fold = use commit, but fold into previous commit (combines N and N-1)
+# d, drop = remove commit from history
+# m, mess = edit message without changing commit content
+#
+""")
+
+def between(repo, old, new, keep):
+ revs = [old]
+ current = old
+ while current != new:
+ ctx = repo[current]
+ if not keep and len(ctx.children()) > 1:
+ raise util.Abort(_('cannot edit history that would orphan nodes'))
+ if len(ctx.parents()) != 1 and ctx.parents()[1] != node.nullid:
+ raise util.Abort(_("can't edit history with merges"))
+ if not ctx.children():
+ current = new
+ else:
+ current = ctx.children()[0].node()
+ revs.append(current)
+ if len(repo[current].children()) and not keep:
+ raise util.Abort(_('cannot edit history that would orphan nodes'))
+ return revs
+
+
+def pick(ui, repo, ctx, ha, opts):
+ oldctx = repo[ha]
+ if oldctx.parents()[0] == ctx:
+ ui.debug('node %s unchanged\n' % ha)
+ return oldctx, [], [], []
+ hg.update(repo, ctx.node())
+ fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
+ fp = os.fdopen(fd, 'w')
+ diffopts = patch.diffopts(ui, opts)
+ diffopts.git = True
+ diffopts.ignorews = False
+ diffopts.ignorewsamount = False
+ diffopts.ignoreblanklines = False
+ gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+ try:
+ files = set()
+ try:
+ patch.patch(ui, repo, patchfile, files=files, eolmode=None)
+ if not files:
+ ui.warn(_('%s: empty changeset')
+ % node.hex(ha))
+ return ctx, [], [], []
+ finally:
+ os.unlink(patchfile)
+ except Exception:
+ raise util.Abort(_('Fix up the change and run '
+ 'hg histedit --continue'))
+ n = repo.commit(text=oldctx.description(), user=oldctx.user(),
+ date=oldctx.date(), extra=oldctx.extra())
+ return repo[n], [n], [oldctx.node()], []
+
+
+def edit(ui, repo, ctx, ha, opts):
+ oldctx = repo[ha]
+ hg.update(repo, ctx.node())
+ fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
+ fp = os.fdopen(fd, 'w')
+ diffopts = patch.diffopts(ui, opts)
+ diffopts.git = True
+ diffopts.ignorews = False
+ diffopts.ignorewsamount = False
+ diffopts.ignoreblanklines = False
+ gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+ try:
+ files = set()
+ try:
+ patch.patch(ui, repo, patchfile, files=files, eolmode=None)
+ finally:
+ os.unlink(patchfile)
+ except Exception:
+ pass
+ raise util.Abort(_('Make changes as needed, you may commit or record as '
+ 'needed now.\nWhen you are finished, run hg'
+ ' histedit --continue to resume.'))
+
+def fold(ui, repo, ctx, ha, opts):
+ oldctx = repo[ha]
+ hg.update(repo, ctx.node())
+ fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
+ fp = os.fdopen(fd, 'w')
+ diffopts = patch.diffopts(ui, opts)
+ diffopts.git = True
+ diffopts.ignorews = False
+ diffopts.ignorewsamount = False
+ diffopts.ignoreblanklines = False
+ gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+ try:
+ files = set()
+ try:
+ patch.patch(ui, repo, patchfile, files=files, eolmode=None)
+ if not files:
+ ui.warn(_('%s: empty changeset')
+ % node.hex(ha))
+ return ctx, [], [], []
+ finally:
+ os.unlink(patchfile)
+ except Exception:
+ raise util.Abort(_('Fix up the change and run '
+ 'hg histedit --continue'))
+ n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
+ date=oldctx.date(), extra=oldctx.extra())
+ return finishfold(ui, repo, ctx, oldctx, n, opts, [])
+
+def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
+ parent = ctx.parents()[0].node()
+ hg.update(repo, parent)
+ fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
+ fp = os.fdopen(fd, 'w')
+ diffopts = patch.diffopts(ui, opts)
+ diffopts.git = True
+ diffopts.ignorews = False
+ diffopts.ignorewsamount = False
+ diffopts.ignoreblanklines = False
+ gen = patch.diff(repo, parent, newnode, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+ files = set()
+ try:
+ patch.patch(ui, repo, patchfile, files=files, eolmode=None)
+ finally:
+ os.unlink(patchfile)
+ newmessage = '\n***\n'.join(
+ [ctx.description()] +
+ [repo[r].description() for r in internalchanges] +
+ [oldctx.description()]) + '\n'
+ # If the changesets are from the same author, keep it.
+ if ctx.user() == oldctx.user():
+ username = ctx.user()
+ else:
+ username = ui.username()
+ newmessage = ui.edit(newmessage, username)
+ n = repo.commit(text=newmessage, user=username,
+ date=max(ctx.date(), oldctx.date()), extra=oldctx.extra())
+ return repo[n], [n], [oldctx.node(), ctx.node()], [newnode]
+
+def drop(ui, repo, ctx, ha, opts):
+ return ctx, [], [repo[ha].node()], []
+
+
+def message(ui, repo, ctx, ha, opts):
+ oldctx = repo[ha]
+ hg.update(repo, ctx.node())
+ fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
+ fp = os.fdopen(fd, 'w')
+ diffopts = patch.diffopts(ui, opts)
+ diffopts.git = True
+ diffopts.ignorews = False
+ diffopts.ignorewsamount = False
+ diffopts.ignoreblanklines = False
+ gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+ try:
+ files = set()
+ try:
+ patch.patch(ui, repo, patchfile, files=files, eolmode=None)
+ finally:
+ os.unlink(patchfile)
+ except Exception:
+ raise util.Abort(_('Fix up the change and run '
+ 'hg histedit --continue'))
+ message = oldctx.description() + '\n'
+ message = ui.edit(message, ui.username())
+ new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(),
+ extra=oldctx.extra())
+ newctx = repo[new]
+ if oldctx.node() != newctx.node():
+ return newctx, [new], [oldctx.node()], []
+ # We didn't make an edit, so just indicate no replaced nodes
+ return newctx, [new], [], []
+
+
+def makedesc(c):
+ summary = ''
+ if c.description():
+ summary = c.description().splitlines()[0]
+ line = 'pick %s %d %s' % (c.hex()[:12], c.rev(), summary)
+ return line[:80] # trim to 80 chars so it's not stupidly wide in my editor
+
+actiontable = {'p': pick,
+ 'pick': pick,
+ 'e': edit,
+ 'edit': edit,
+ 'f': fold,
+ 'fold': fold,
+ 'd': drop,
+ 'drop': drop,
+ 'm': message,
+ 'mess': message,
+ }
+
+@command('histedit',
+ [('', 'commands', '',
+ _('Read history edits from the specified file.')),
+ ('c', 'continue', False, _('continue an edit already in progress')),
+ ('k', 'keep', False,
+ _("don't strip old nodes after edit is complete")),
+ ('', 'abort', False, _('abort an edit in progress')),
+ ('o', 'outgoing', False, _('changesets not found in destination')),
+ ('f', 'force', False,
+ _('force outgoing even for unrelated repositories')),
+ ('r', 'rev', [], _('first revision to be edited'))],
+ _("[PARENT]"))
+def histedit(ui, repo, *parent, **opts):
+ """interactively edit changeset history
+ """
+ # TODO only abort if we try and histedit mq patches, not just
+ # blanket if mq patches are applied somewhere
+ mq = getattr(repo, 'mq', None)
+ if mq and mq.applied:
+ raise util.Abort(_('source has mq patches applied'))
+
+ parent = list(parent) + opts.get('rev', [])
+ if opts.get('outgoing'):
+ if len(parent) > 1:
+ raise util.Abort(
+ _('only one repo argument allowed with --outgoing'))
+ elif parent:
+ parent = parent[0]
+
+ dest = ui.expandpath(parent or 'default-push', parent or 'default')
+ dest, revs = hg.parseurl(dest, None)[:2]
+ ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+
+ revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
+ other = hg.peer(repo, opts, dest)
+
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+
+ parent = discovery.findcommonoutgoing(
+ repo, other, [], force=opts.get('force')).missing[0:1]
+ else:
+ if opts.get('force'):
+ raise util.Abort(_('--force only allowed with --outgoing'))
+
+ if opts.get('continue', False):
+ if len(parent) != 0:
+ raise util.Abort(_('no arguments allowed with --continue'))
+ (parentctxnode, created, replaced,
+ tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo)
+ currentparent, wantnull = repo.dirstate.parents()
+ parentctx = repo[parentctxnode]
+ # existing is the list of revisions initially considered by
+ # histedit. Here we use it to list new changesets, descendants
+ # of parentctx without an 'existing' changeset in-between. We
+ # also have to exclude 'existing' changesets which were
+ # previously dropped.
+ descendants = set(c.node() for c in
+ repo.set('(%n::) - %n', parentctxnode, parentctxnode))
+ existing = set(existing)
+ notdropped = set(n for n in existing if n in descendants and
+ (n not in replacemap or replacemap[n] in descendants))
+ # Discover any nodes the user has added in the interim. We can
+ # miss changesets which were dropped and recreated the same.
+ newchildren = list(c.node() for c in repo.set(
+ 'sort(%ln - (%ln or %ln::))', descendants, existing, notdropped))
+ action, currentnode = rules.pop(0)
+ if action in ('f', 'fold'):
+ tmpnodes.extend(newchildren)
+ else:
+ created.extend(newchildren)
+
+ m, a, r, d = repo.status()[:4]
+ oldctx = repo[currentnode]
+ message = oldctx.description() + '\n'
+ if action in ('e', 'edit', 'm', 'mess'):
+ message = ui.edit(message, ui.username())
+ elif action in ('f', 'fold'):
+ message = 'fold-temp-revision %s' % currentnode
+ new = None
+ if m or a or r or d:
+ new = repo.commit(text=message, user=oldctx.user(),
+ date=oldctx.date(), extra=oldctx.extra())
+
+ # If we're resuming a fold and we have new changes, mark the
+ # replacements and finish the fold. If not, it's more like a
+ # drop of the changesets that disappeared, and we can skip
+ # this step.
+ if action in ('f', 'fold') and (new or newchildren):
+ if new:
+ tmpnodes.append(new)
+ else:
+ new = newchildren[-1]
+ (parentctx, created_, replaced_, tmpnodes_) = finishfold(
+ ui, repo, parentctx, oldctx, new, opts, newchildren)
+ replaced.extend(replaced_)
+ created.extend(created_)
+ tmpnodes.extend(tmpnodes_)
+ elif action not in ('d', 'drop'):
+ if new != oldctx.node():
+ replaced.append(oldctx.node())
+ if new:
+ if new != oldctx.node():
+ created.append(new)
+ parentctx = repo[new]
+
+ elif opts.get('abort', False):
+ if len(parent) != 0:
+ raise util.Abort(_('no arguments allowed with --abort'))
+ (parentctxnode, created, replaced, tmpnodes,
+ existing, rules, keep, tip, replacemap) = readstate(repo)
+ ui.debug('restore wc to old tip %s\n' % node.hex(tip))
+ hg.clean(repo, tip)
+ ui.debug('should strip created nodes %s\n' %
+ ', '.join([node.hex(n)[:12] for n in created]))
+ ui.debug('should strip temp nodes %s\n' %
+ ', '.join([node.hex(n)[:12] for n in tmpnodes]))
+ for nodes in (created, tmpnodes):
+ lock = None
+ try:
+ lock = repo.lock()
+ for n in reversed(nodes):
+ try:
+ repair.strip(ui, repo, n)
+ except error.LookupError:
+ pass
+ finally:
+ lockmod.release(lock)
+ os.unlink(os.path.join(repo.path, 'histedit-state'))
+ return
+ else:
+ cmdutil.bailifchanged(repo)
+ if os.path.exists(os.path.join(repo.path, 'histedit-state')):
+ raise util.Abort(_('history edit already in progress, try '
+ '--continue or --abort'))
+
+ tip, empty = repo.dirstate.parents()
+
+
+ if len(parent) != 1:
+ raise util.Abort(_('histedit requires exactly one parent revision'))
+ parent = scmutil.revsingle(repo, parent[0]).node()
+
+ keep = opts.get('keep', False)
+ revs = between(repo, parent, tip, keep)
+
+ ctxs = [repo[r] for r in revs]
+ existing = [r.node() for r in ctxs]
+ rules = opts.get('commands', '')
+ if not rules:
+ rules = '\n'.join([makedesc(c) for c in ctxs])
+ rules += '\n\n'
+ rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12])
+ rules = ui.edit(rules, ui.username())
+ # Save edit rules in .hg/histedit-last-edit.txt in case
+ # the user needs to ask for help after something
+ # surprising happens.
+ f = open(repo.join('histedit-last-edit.txt'), 'w')
+ f.write(rules)
+ f.close()
+ else:
+ f = open(rules)
+ rules = f.read()
+ f.close()
+ rules = [l for l in (r.strip() for r in rules.splitlines())
+ if l and not l[0] == '#']
+ rules = verifyrules(rules, repo, ctxs)
+
+ parentctx = repo[parent].parents()[0]
+ keep = opts.get('keep', False)
+ replaced = []
+ replacemap = {}
+ tmpnodes = []
+ created = []
+
+
+ while rules:
+ writestate(repo, parentctx.node(), created, replaced,
+ tmpnodes, existing, rules, keep, tip, replacemap)
+ action, ha = rules.pop(0)
+ (parentctx, created_, replaced_, tmpnodes_) = actiontable[action](
+ ui, repo, parentctx, ha, opts)
+
+ if replaced_:
+ clen, rlen = len(created_), len(replaced_)
+ if clen == rlen == 1:
+ ui.debug('histedit: exact replacement of %s with %s\n' % (
+ node.short(replaced_[0]), node.short(created_[0])))
+
+ replacemap[replaced_[0]] = created_[0]
+ elif clen > rlen:
+ assert rlen == 1, ('unexpected replacement of '
+ '%d changes with %d changes' % (rlen, clen))
+ # made more changesets than we're replacing
+ # TODO synthesize patch names for created patches
+ replacemap[replaced_[0]] = created_[-1]
+ ui.debug('histedit: created many, assuming %s replaced by %s' %
+ (node.short(replaced_[0]), node.short(created_[-1])))
+ elif rlen > clen:
+ if not created_:
+ # This must be a drop. Try and put our metadata on
+ # the parent change.
+ assert rlen == 1
+ r = replaced_[0]
+ ui.debug('histedit: %s seems replaced with nothing, '
+ 'finding a parent\n' % (node.short(r)))
+ pctx = repo[r].parents()[0]
+ if pctx.node() in replacemap:
+ ui.debug('histedit: parent is already replaced\n')
+ replacemap[r] = replacemap[pctx.node()]
+ else:
+ replacemap[r] = pctx.node()
+ ui.debug('histedit: %s best replaced by %s\n' % (
+ node.short(r), node.short(replacemap[r])))
+ else:
+ assert len(created_) == 1
+ for r in replaced_:
+ ui.debug('histedit: %s replaced by %s\n' % (
+ node.short(r), node.short(created_[0])))
+ replacemap[r] = created_[0]
+ else:
+ assert False, (
+ 'Unhandled case in replacement mapping! '
+ 'replacing %d changes with %d changes' % (rlen, clen))
+ created.extend(created_)
+ replaced.extend(replaced_)
+ tmpnodes.extend(tmpnodes_)
+
+ hg.update(repo, parentctx.node())
+
+ if not keep:
+ if replacemap:
+ ui.note(_('histedit: Should update metadata for the following '
+ 'changes:\n'))
+
+ def copybms(old, new):
+ if old in tmpnodes or old in created:
+ # can't have any metadata we'd want to update
+ return
+ while new in replacemap:
+ new = replacemap[new]
+ ui.note(_('histedit: %s to %s\n') % (node.short(old),
+ node.short(new)))
+ octx = repo[old]
+ marks = octx.bookmarks()
+ if marks:
+ ui.note(_('histedit: moving bookmarks %s\n') %
+ ', '.join(marks))
+ for mark in marks:
+ repo._bookmarks[mark] = new
+ bookmarks.write(repo)
+
+ # We assume that bookmarks on the tip should remain
+ # tipmost, but bookmarks on non-tip changesets should go
+ # to their most reasonable successor. As a result, find
+ # the old tip and new tip and copy those bookmarks first,
+ # then do the rest of the bookmark copies.
+ oldtip = sorted(replacemap.keys(), key=repo.changelog.rev)[-1]
+ newtip = sorted(replacemap.values(), key=repo.changelog.rev)[-1]
+ copybms(oldtip, newtip)
+
+ for old, new in sorted(replacemap.iteritems()):
+ copybms(old, new)
+ # TODO update mq state
+
+ ui.debug('should strip replaced nodes %s\n' %
+ ', '.join([node.hex(n)[:12] for n in replaced]))
+ lock = None
+ try:
+ lock = repo.lock()
+ for n in sorted(replaced, key=lambda x: repo[x].rev()):
+ try:
+ repair.strip(ui, repo, n)
+ except error.LookupError:
+ pass
+ finally:
+ lockmod.release(lock)
+
+ ui.debug('should strip temp nodes %s\n' %
+ ', '.join([node.hex(n)[:12] for n in tmpnodes]))
+ lock = None
+ try:
+ lock = repo.lock()
+ for n in reversed(tmpnodes):
+ try:
+ repair.strip(ui, repo, n)
+ except error.LookupError:
+ pass
+ finally:
+ lockmod.release(lock)
+ os.unlink(os.path.join(repo.path, 'histedit-state'))
+ if os.path.exists(repo.sjoin('undo')):
+ os.unlink(repo.sjoin('undo'))
+
+
+def writestate(repo, parentctxnode, created, replaced,
+ tmpnodes, existing, rules, keep, oldtip, replacemap):
+ fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
+ pickle.dump((parentctxnode, created, replaced,
+ tmpnodes, existing, rules, keep, oldtip, replacemap),
+ fp)
+ fp.close()
+
+def readstate(repo):
+ """Returns a tuple of (parentnode, created, replaced, tmp, existing, rules,
+ keep, oldtip, replacemap ).
+ """
+ fp = open(os.path.join(repo.path, 'histedit-state'))
+ return pickle.load(fp)
+
+
+def verifyrules(rules, repo, ctxs):
+ """Verify that there exists exactly one edit rule per given changeset.
+
+ Will abort if there are to many or too few rules, a malformed rule,
+ or a rule on a changeset outside of the user-given range.
+ """
+ parsed = []
+ if len(rules) != len(ctxs):
+ raise util.Abort(_('must specify a rule for each changeset once'))
+ for r in rules:
+ if ' ' not in r:
+ raise util.Abort(_('malformed line "%s"') % r)
+ action, rest = r.split(' ', 1)
+ if ' ' in rest.strip():
+ ha, rest = rest.split(' ', 1)
+ else:
+ ha = r.strip()
+ try:
+ if repo[ha] not in ctxs:
+ raise util.Abort(
+ _('may not use changesets other than the ones listed'))
+ except error.RepoError:
+ raise util.Abort(_('unknown changeset %s listed') % ha)
+ if action not in actiontable:
+ raise util.Abort(_('unknown action "%s"') % action)
+ parsed.append([action, ha])
+ return parsed
diff --git a/hgext/inotify/__init__.py b/hgext/inotify/__init__.py
new file mode 100644
index 0000000..09c8bef
--- /dev/null
+++ b/hgext/inotify/__init__.py
@@ -0,0 +1,93 @@
+# __init__.py - inotify-based status acceleration for Linux
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''accelerate status report using Linux's inotify service'''
+
+# todo: socket permissions
+
+from mercurial.i18n import _
+from mercurial import util
+import server
+from client import client, QueryFailed
+
+testedwith = 'internal'
+
+def serve(ui, repo, **opts):
+ '''start an inotify server for this repository'''
+ server.start(ui, repo.dirstate, repo.root, opts)
+
+def debuginotify(ui, repo, **opts):
+ '''debugging information for inotify extension
+
+ Prints the list of directories being watched by the inotify server.
+ '''
+ cli = client(ui, repo)
+ response = cli.debugquery()
+
+ ui.write(_('directories being watched:\n'))
+ for path in response:
+ ui.write((' %s/\n') % path)
+
+def reposetup(ui, repo):
+ if not util.safehasattr(repo, 'dirstate'):
+ return
+
+ class inotifydirstate(repo.dirstate.__class__):
+
+ # We'll set this to false after an unsuccessful attempt so that
+ # next calls of status() within the same instance don't try again
+ # to start an inotify server if it won't start.
+ _inotifyon = True
+
+ def status(self, match, subrepos, ignored, clean, unknown):
+ files = match.files()
+ if '.' in files:
+ files = []
+ if (self._inotifyon and not ignored and not subrepos and
+ not self._dirty):
+ cli = client(ui, repo)
+ try:
+ result = cli.statusquery(files, match, False,
+ clean, unknown)
+ except QueryFailed, instr:
+ ui.debug(str(instr))
+ # don't retry within the same hg instance
+ inotifydirstate._inotifyon = False
+ pass
+ else:
+ if ui.config('inotify', 'debug'):
+ r2 = super(inotifydirstate, self).status(
+ match, [], False, clean, unknown)
+ for c, a, b in zip('LMARDUIC', result, r2):
+ for f in a:
+ if f not in b:
+ ui.warn('*** inotify: %s +%s\n' % (c, f))
+ for f in b:
+ if f not in a:
+ ui.warn('*** inotify: %s -%s\n' % (c, f))
+ result = r2
+ return result
+ return super(inotifydirstate, self).status(
+ match, subrepos, ignored, clean, unknown)
+
+ repo.dirstate.__class__ = inotifydirstate
+
+cmdtable = {
+ 'debuginotify':
+ (debuginotify, [], ('hg debuginotify')),
+ '^inserve':
+ (serve,
+ [('d', 'daemon', None, _('run server in background')),
+ ('', 'daemon-pipefds', '',
+ _('used internally by daemon mode'), _('NUM')),
+ ('t', 'idle-timeout', '',
+ _('minutes to sit idle before exiting'), _('NUM')),
+ ('', 'pid-file', '',
+ _('name of file to write process ID to'), _('FILE'))],
+ _('hg inserve [OPTION]...')),
+ }
diff --git a/hgext/inotify/client.py b/hgext/inotify/client.py
new file mode 100644
index 0000000..0142b18
--- /dev/null
+++ b/hgext/inotify/client.py
@@ -0,0 +1,172 @@
+# client.py - inotify status client
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial.i18n import _
+import common, server
+import errno, os, socket, struct
+
+class QueryFailed(Exception):
+ pass
+
+def start_server(function):
+ """
+ Decorator.
+ Tries to call function, if it fails, try to (re)start inotify server.
+ Raise QueryFailed if something went wrong
+ """
+ def decorated_function(self, *args):
+ try:
+ return function(self, *args)
+ except (OSError, socket.error), err:
+ autostart = self.ui.configbool('inotify', 'autostart', True)
+
+ if err.args[0] == errno.ECONNREFUSED:
+ self.ui.warn(_('inotify-client: found dead inotify server '
+ 'socket; removing it\n'))
+ os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
+ if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
+ try:
+ try:
+ server.start(self.ui, self.dirstate, self.root,
+ dict(daemon=True, daemon_pipefds=''))
+ except server.AlreadyStartedException, inst:
+ # another process may have started its own
+ # inotify server while this one was starting.
+ self.ui.debug(str(inst))
+ except Exception, inst:
+ self.ui.warn(_('inotify-client: could not start inotify '
+ 'server: %s\n') % inst)
+ else:
+ try:
+ return function(self, *args)
+ except socket.error, err:
+ self.ui.warn(_('inotify-client: could not talk to new '
+ 'inotify server: %s\n') % err.args[-1])
+ elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
+ # silently ignore normal errors if autostart is False
+ self.ui.debug('(inotify server not running)\n')
+ else:
+ self.ui.warn(_('inotify-client: failed to contact inotify '
+ 'server: %s\n') % err.args[-1])
+
+ self.ui.traceback()
+ raise QueryFailed('inotify query failed')
+
+ return decorated_function
+
+
+class client(object):
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.dirstate = repo.dirstate
+ self.root = repo.root
+ self.sock = socket.socket(socket.AF_UNIX)
+
+ def _connect(self):
+ sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
+ try:
+ self.sock.connect(sockpath)
+ except socket.error, err:
+ if err.args[0] == "AF_UNIX path too long":
+ sockpath = os.readlink(sockpath)
+ self.sock.connect(sockpath)
+ else:
+ raise
+
+ def _send(self, type, data):
+ """Sends protocol version number, and the data"""
+ self.sock.sendall(chr(common.version) + type + data)
+
+ self.sock.shutdown(socket.SHUT_WR)
+
+ def _receive(self, type):
+ """
+ Read data, check version number, extract headers,
+ and returns a tuple (data descriptor, header)
+ Raises QueryFailed on error
+ """
+ cs = common.recvcs(self.sock)
+ try:
+ version = ord(cs.read(1))
+ except TypeError:
+ # empty answer, assume the server crashed
+ self.ui.warn(_('inotify-client: received empty answer from inotify '
+ 'server'))
+ raise QueryFailed('server crashed')
+
+ if version != common.version:
+ self.ui.warn(_('(inotify: received response from incompatible '
+ 'server version %d)\n') % version)
+ raise QueryFailed('incompatible server version')
+
+ readtype = cs.read(4)
+ if readtype != type:
+ self.ui.warn(_('(inotify: received \'%s\' response when expecting'
+ ' \'%s\')\n') % (readtype, type))
+ raise QueryFailed('wrong response type')
+
+ hdrfmt = common.resphdrfmts[type]
+ hdrsize = common.resphdrsizes[type]
+ try:
+ resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
+ except struct.error:
+ raise QueryFailed('unable to retrieve query response headers')
+
+ return cs, resphdr
+
+ def query(self, type, req):
+ self._connect()
+
+ self._send(type, req)
+
+ return self._receive(type)
+
+ @start_server
+ def statusquery(self, names, match, ignored, clean, unknown=True):
+
+ def genquery():
+ for n in names:
+ yield n
+ states = 'almrx!'
+ if ignored:
+ raise ValueError('this is insanity')
+ if clean:
+ states += 'c'
+ if unknown:
+ states += '?'
+ yield states
+
+ req = '\0'.join(genquery())
+
+ cs, resphdr = self.query('STAT', req)
+
+ def readnames(nbytes):
+ if nbytes:
+ names = cs.read(nbytes)
+ if names:
+ return filter(match, names.split('\0'))
+ return []
+ results = tuple(map(readnames, resphdr[:-1]))
+
+ if names:
+ nbytes = resphdr[-1]
+ vdirs = cs.read(nbytes)
+ if vdirs:
+ for vdir in vdirs.split('\0'):
+ match.dir(vdir)
+
+ return results
+
+ @start_server
+ def debugquery(self):
+ cs, resphdr = self.query('DBUG', '')
+
+ nbytes = resphdr[0]
+ names = cs.read(nbytes)
+ return names.split('\0')
diff --git a/hgext/inotify/common.py b/hgext/inotify/common.py
new file mode 100644
index 0000000..8a5ea1d
--- /dev/null
+++ b/hgext/inotify/common.py
@@ -0,0 +1,53 @@
+# server.py - inotify common protocol code
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import cStringIO, socket, struct
+
+"""
+ Protocol between inotify clients and server:
+
+ Client sending query:
+ 1) send protocol version number
+ 2) send query type (string, 4 letters long)
+ 3) send query parameters:
+ - For STAT, N+1 \0-separated strings:
+ 1) N different names that need checking
+ 2) 1 string containing all the status types to match
+ - No parameter needed for DBUG
+
+ Server sending query answer:
+ 1) send protocol version number
+ 2) send query type
+ 3) send struct.pack'ed headers describing the length of the content:
+ e.g. for STAT, receive 9 integers describing the length of the
+ 9 \0-separated string lists to be read:
+ * one file list for each lmar!?ic status type
+ * one list containing the directories visited during lookup
+
+"""
+
+version = 3
+
+resphdrfmts = {
+ 'STAT': '>lllllllll', # status requests
+ 'DBUG': '>l' # debugging queries
+}
+resphdrsizes = dict((k, struct.calcsize(v))
+ for k, v in resphdrfmts.iteritems())
+
+def recvcs(sock):
+ cs = cStringIO.StringIO()
+ s = True
+ try:
+ while s:
+ s = sock.recv(65536)
+ cs.write(s)
+ finally:
+ sock.shutdown(socket.SHUT_RD)
+ cs.seek(0)
+ return cs
diff --git a/hgext/inotify/linux/__init__.py b/hgext/inotify/linux/__init__.py
new file mode 100644
index 0000000..8a1bafd
--- /dev/null
+++ b/hgext/inotify/linux/__init__.py
@@ -0,0 +1,44 @@
+# __init__.py - low-level interfaces to the Linux inotify subsystem
+
+# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+
+# This library is free software; you can redistribute it and/or modify
+# it under the terms of version 2.1 of the GNU Lesser General Public
+# License, or any later version.
+
+'''Low-level interface to the Linux inotify subsystem.
+
+The inotify subsystem provides an efficient mechanism for file status
+monitoring and change notification.
+
+This package provides the low-level inotify system call interface and
+associated constants and helper functions.
+
+For a higher-level interface that remains highly efficient, use the
+inotify.watcher package.'''
+
+__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
+
+from _inotify import *
+
+procfs_path = '/proc/sys/fs/inotify'
+
+def _read_procfs_value(name):
+ def read_value():
+ try:
+ fp = open(procfs_path + '/' + name)
+ r = int(fp.read())
+ fp.close()
+ return r
+ except OSError:
+ return None
+
+ read_value.__doc__ = '''Return the value of the %s setting from /proc.
+
+ If inotify is not enabled on this system, return None.''' % name
+
+ return read_value
+
+max_queued_events = _read_procfs_value('max_queued_events')
+max_user_instances = _read_procfs_value('max_user_instances')
+max_user_watches = _read_procfs_value('max_user_watches')
diff --git a/hgext/inotify/linux/_inotify.c b/hgext/inotify/linux/_inotify.c
new file mode 100644
index 0000000..5e31b85
--- /dev/null
+++ b/hgext/inotify/linux/_inotify.c
@@ -0,0 +1,649 @@
+/*
+ * _inotify.c - Python extension interfacing to the Linux inotify subsystem
+ *
+ * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of version 2.1 of the GNU Lesser General
+ * Public License or any later version.
+ */
+
+#include <Python.h>
+#include <alloca.h>
+#include <sys/inotify.h>
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <util.h>
+
+/* Variables used in the event string representation */
+static PyObject *join;
+static PyObject *er_wm;
+static PyObject *er_wmc;
+static PyObject *er_wmn;
+static PyObject *er_wmcn;
+
+static PyObject *init(PyObject *self, PyObject *args)
+{
+ PyObject *ret = NULL;
+ int fd = -1;
+
+ if (!PyArg_ParseTuple(args, ":init"))
+ goto bail;
+
+ Py_BEGIN_ALLOW_THREADS;
+ fd = inotify_init();
+ Py_END_ALLOW_THREADS;
+
+ if (fd == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+
+ ret = PyInt_FromLong(fd);
+ if (ret == NULL)
+ goto bail;
+
+ goto done;
+
+bail:
+ if (fd != -1)
+ close(fd);
+
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+PyDoc_STRVAR(
+ init_doc,
+ "init() -> fd\n"
+ "\n"
+ "Initialise an inotify instance.\n"
+ "Return a file descriptor associated with a new inotify event queue.");
+
+static PyObject *add_watch(PyObject *self, PyObject *args)
+{
+ PyObject *ret = NULL;
+ uint32_t mask;
+ int wd = -1;
+ char *path;
+ int fd;
+
+ if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
+ goto bail;
+
+ Py_BEGIN_ALLOW_THREADS;
+ wd = inotify_add_watch(fd, path, mask);
+ Py_END_ALLOW_THREADS;
+
+ if (wd == -1) {
+ PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
+ goto bail;
+ }
+
+ ret = PyInt_FromLong(wd);
+ if (ret == NULL)
+ goto bail;
+
+ goto done;
+
+bail:
+ if (wd != -1)
+ inotify_rm_watch(fd, wd);
+
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+PyDoc_STRVAR(
+ add_watch_doc,
+ "add_watch(fd, path, mask) -> wd\n"
+ "\n"
+ "Add a watch to an inotify instance, or modify an existing watch.\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " path: path to watch\n"
+ " mask: mask of events to watch for\n"
+ "\n"
+ "Return a unique numeric watch descriptor for the inotify instance\n"
+ "mapped by the file descriptor.");
+
+static PyObject *remove_watch(PyObject *self, PyObject *args)
+{
+ uint32_t wd;
+ int fd;
+ int r;
+
+ if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
+ return NULL;
+
+ Py_BEGIN_ALLOW_THREADS;
+ r = inotify_rm_watch(fd, wd);
+ Py_END_ALLOW_THREADS;
+
+ if (r == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+PyDoc_STRVAR(
+ remove_watch_doc,
+ "remove_watch(fd, wd)\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " wd: watch descriptor returned by add_watch()\n"
+ "\n"
+ "Remove a watch associated with the watch descriptor wd from the\n"
+ "inotify instance associated with the file descriptor fd.\n"
+ "\n"
+ "Removing a watch causes an IN_IGNORED event to be generated for this\n"
+ "watch descriptor.");
+
+#define bit_name(x) {x, #x}
+
+static struct {
+ int bit;
+ const char *name;
+ PyObject *pyname;
+} bit_names[] = {
+ bit_name(IN_ACCESS),
+ bit_name(IN_MODIFY),
+ bit_name(IN_ATTRIB),
+ bit_name(IN_CLOSE_WRITE),
+ bit_name(IN_CLOSE_NOWRITE),
+ bit_name(IN_OPEN),
+ bit_name(IN_MOVED_FROM),
+ bit_name(IN_MOVED_TO),
+ bit_name(IN_CREATE),
+ bit_name(IN_DELETE),
+ bit_name(IN_DELETE_SELF),
+ bit_name(IN_MOVE_SELF),
+ bit_name(IN_UNMOUNT),
+ bit_name(IN_Q_OVERFLOW),
+ bit_name(IN_IGNORED),
+ bit_name(IN_ONLYDIR),
+ bit_name(IN_DONT_FOLLOW),
+ bit_name(IN_MASK_ADD),
+ bit_name(IN_ISDIR),
+ bit_name(IN_ONESHOT),
+ {0}
+};
+
+static PyObject *decode_mask(int mask)
+{
+ PyObject *ret = PyList_New(0);
+ int i;
+
+ if (ret == NULL)
+ goto bail;
+
+ for (i = 0; bit_names[i].bit; i++) {
+ if (mask & bit_names[i].bit) {
+ if (bit_names[i].pyname == NULL) {
+ bit_names[i].pyname = PyString_FromString(bit_names[i].name);
+ if (bit_names[i].pyname == NULL)
+ goto bail;
+ }
+ Py_INCREF(bit_names[i].pyname);
+ if (PyList_Append(ret, bit_names[i].pyname) == -1)
+ goto bail;
+ }
+ }
+
+ goto done;
+
+bail:
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+static PyObject *pydecode_mask(PyObject *self, PyObject *args)
+{
+ int mask;
+
+ if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
+ return NULL;
+
+ return decode_mask(mask);
+}
+
+PyDoc_STRVAR(
+ decode_mask_doc,
+ "decode_mask(mask) -> list_of_strings\n"
+ "\n"
+ "Decode an inotify mask value into a list of strings that give the\n"
+ "name of each bit set in the mask.");
+
+static char doc[] = "Low-level inotify interface wrappers.";
+
+static void define_const(PyObject *dict, const char *name, uint32_t val)
+{
+ PyObject *pyval = PyInt_FromLong(val);
+ PyObject *pyname = PyString_FromString(name);
+
+ if (!pyname || !pyval)
+ goto bail;
+
+ PyDict_SetItem(dict, pyname, pyval);
+
+bail:
+ Py_XDECREF(pyname);
+ Py_XDECREF(pyval);
+}
+
+static void define_consts(PyObject *dict)
+{
+ define_const(dict, "IN_ACCESS", IN_ACCESS);
+ define_const(dict, "IN_MODIFY", IN_MODIFY);
+ define_const(dict, "IN_ATTRIB", IN_ATTRIB);
+ define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
+ define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
+ define_const(dict, "IN_OPEN", IN_OPEN);
+ define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
+ define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
+
+ define_const(dict, "IN_CLOSE", IN_CLOSE);
+ define_const(dict, "IN_MOVE", IN_MOVE);
+
+ define_const(dict, "IN_CREATE", IN_CREATE);
+ define_const(dict, "IN_DELETE", IN_DELETE);
+ define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
+ define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
+ define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
+ define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
+ define_const(dict, "IN_IGNORED", IN_IGNORED);
+
+ define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
+ define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
+ define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
+ define_const(dict, "IN_ISDIR", IN_ISDIR);
+ define_const(dict, "IN_ONESHOT", IN_ONESHOT);
+ define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
+}
+
+struct event {
+ PyObject_HEAD
+ PyObject *wd;
+ PyObject *mask;
+ PyObject *cookie;
+ PyObject *name;
+};
+
+static PyObject *event_wd(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *)self;
+ Py_INCREF(evt->wd);
+ return evt->wd;
+}
+
+static PyObject *event_mask(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *)self;
+ Py_INCREF(evt->mask);
+ return evt->mask;
+}
+
+static PyObject *event_cookie(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *)self;
+ Py_INCREF(evt->cookie);
+ return evt->cookie;
+}
+
+static PyObject *event_name(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *)self;
+ Py_INCREF(evt->name);
+ return evt->name;
+}
+
+static struct PyGetSetDef event_getsets[] = {
+ {"wd", event_wd, NULL,
+ "watch descriptor"},
+ {"mask", event_mask, NULL,
+ "event mask"},
+ {"cookie", event_cookie, NULL,
+ "rename cookie, if rename-related event"},
+ {"name", event_name, NULL,
+ "file name"},
+ {NULL}
+};
+
+PyDoc_STRVAR(
+ event_doc,
+ "event: Structure describing an inotify event.");
+
+static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
+{
+ return (*t->tp_alloc)(t, 0);
+}
+
+static void event_dealloc(struct event *evt)
+{
+ Py_XDECREF(evt->wd);
+ Py_XDECREF(evt->mask);
+ Py_XDECREF(evt->cookie);
+ Py_XDECREF(evt->name);
+
+ Py_TYPE(evt)->tp_free(evt);
+}
+
+static PyObject *event_repr(struct event *evt)
+{
+ int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
+ PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
+ PyObject *tuple = NULL, *formatstr = NULL;
+
+ pymasks = decode_mask(PyInt_AsLong(evt->mask));
+ if (pymasks == NULL)
+ goto bail;
+
+ pymask = _PyString_Join(join, pymasks);
+ if (pymask == NULL)
+ goto bail;
+
+ if (evt->name != Py_None) {
+ if (cookie == -1) {
+ formatstr = er_wmn;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->name);
+ }
+ else {
+ formatstr = er_wmcn;
+ tuple = PyTuple_Pack(4, evt->wd, pymask,
+ evt->cookie, evt->name);
+ }
+ } else {
+ if (cookie == -1) {
+ formatstr = er_wm;
+ tuple = PyTuple_Pack(2, evt->wd, pymask);
+ }
+ else {
+ formatstr = er_wmc;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->cookie);
+ }
+ }
+
+ if (tuple == NULL)
+ goto bail;
+
+ ret = PyNumber_Remainder(formatstr, tuple);
+
+ if (ret == NULL)
+ goto bail;
+
+ goto done;
+bail:
+ Py_CLEAR(ret);
+
+done:
+ Py_XDECREF(pymask);
+ Py_XDECREF(pymasks);
+ Py_XDECREF(tuple);
+
+ return ret;
+}
+
+static PyTypeObject event_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_inotify.event", /*tp_name*/
+ sizeof(struct event), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)event_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ (reprfunc)event_repr, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ event_doc, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ event_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ event_new, /* tp_new */
+};
+
+PyObject *read_events(PyObject *self, PyObject *args)
+{
+ PyObject *ctor_args = NULL;
+ PyObject *pybufsize = NULL;
+ PyObject *ret = NULL;
+ int bufsize = 65536;
+ char *buf = NULL;
+ int nread, pos;
+ int fd;
+
+ if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
+ goto bail;
+
+ if (pybufsize && pybufsize != Py_None)
+ bufsize = PyInt_AsLong(pybufsize);
+
+ ret = PyList_New(0);
+ if (ret == NULL)
+ goto bail;
+
+ if (bufsize <= 0) {
+ int r;
+
+ Py_BEGIN_ALLOW_THREADS;
+ r = ioctl(fd, FIONREAD, &bufsize);
+ Py_END_ALLOW_THREADS;
+
+ if (r == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+ if (bufsize == 0)
+ goto done;
+ }
+ else {
+ static long name_max;
+ static long name_fd = -1;
+ long min;
+
+ if (name_fd != fd) {
+ name_fd = fd;
+ Py_BEGIN_ALLOW_THREADS;
+ name_max = fpathconf(fd, _PC_NAME_MAX);
+ Py_END_ALLOW_THREADS;
+ }
+
+ min = sizeof(struct inotify_event) + name_max + 1;
+
+ if (bufsize < min) {
+ PyErr_Format(PyExc_ValueError,
+ "bufsize must be at least %d", (int)min);
+ goto bail;
+ }
+ }
+
+ buf = alloca(bufsize);
+
+ Py_BEGIN_ALLOW_THREADS;
+ nread = read(fd, buf, bufsize);
+ Py_END_ALLOW_THREADS;
+
+ if (nread == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+
+ ctor_args = PyTuple_New(0);
+
+ if (ctor_args == NULL)
+ goto bail;
+
+ pos = 0;
+
+ while (pos < nread) {
+ struct inotify_event *in = (struct inotify_event *)(buf + pos);
+ struct event *evt;
+ PyObject *obj;
+
+ obj = PyObject_CallObject((PyObject *)&event_type, ctor_args);
+
+ if (obj == NULL)
+ goto bail;
+
+ evt = (struct event *)obj;
+
+ evt->wd = PyInt_FromLong(in->wd);
+ evt->mask = PyInt_FromLong(in->mask);
+ if (in->mask & IN_MOVE)
+ evt->cookie = PyInt_FromLong(in->cookie);
+ else {
+ Py_INCREF(Py_None);
+ evt->cookie = Py_None;
+ }
+ if (in->len)
+ evt->name = PyString_FromString(in->name);
+ else {
+ Py_INCREF(Py_None);
+ evt->name = Py_None;
+ }
+
+ if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
+ goto mybail;
+
+ if (PyList_Append(ret, obj) == -1)
+ goto mybail;
+
+ pos += sizeof(struct inotify_event) + in->len;
+ continue;
+
+ mybail:
+ Py_CLEAR(evt->wd);
+ Py_CLEAR(evt->mask);
+ Py_CLEAR(evt->cookie);
+ Py_CLEAR(evt->name);
+ Py_DECREF(obj);
+
+ goto bail;
+ }
+
+ goto done;
+
+bail:
+ Py_CLEAR(ret);
+
+done:
+ Py_XDECREF(ctor_args);
+
+ return ret;
+}
+
+static int init_globals(void)
+{
+ join = PyString_FromString("|");
+ er_wm = PyString_FromString("event(wd=%d, mask=%s)");
+ er_wmn = PyString_FromString("event(wd=%d, mask=%s, name=%s)");
+ er_wmc = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x)");
+ er_wmcn = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x, name=%s)");
+
+ return join && er_wm && er_wmn && er_wmc && er_wmcn;
+}
+
+PyDoc_STRVAR(
+ read_doc,
+ "read(fd, bufsize[=65536]) -> list_of_events\n"
+ "\n"
+ "\nRead inotify events from a file descriptor.\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " bufsize: size of buffer to read into, in bytes\n"
+ "\n"
+ "Return a list of event objects.\n"
+ "\n"
+ "If bufsize is > 0, block until events are available to be read.\n"
+ "Otherwise, immediately return all events that can be read without\n"
+ "blocking.");
+
+static PyMethodDef methods[] = {
+ {"init", init, METH_VARARGS, init_doc},
+ {"add_watch", add_watch, METH_VARARGS, add_watch_doc},
+ {"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
+ {"read", read_events, METH_VARARGS, read_doc},
+ {"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
+ {NULL},
+};
+
+#ifdef IS_PY3K
+static struct PyModuleDef _inotify_module = {
+ PyModuleDef_HEAD_INIT,
+ "_inotify",
+ doc,
+ -1,
+ methods
+};
+
+PyMODINIT_FUNC PyInit__inotify(void)
+{
+ PyObject *mod, *dict;
+
+ mod = PyModule_Create(&_inotify_module);
+
+ if (mod == NULL)
+ return NULL;
+
+ if (!init_globals())
+ return;
+
+ dict = PyModule_GetDict(mod);
+
+ if (dict)
+ define_consts(dict);
+
+ return mod;
+}
+#else
+void init_inotify(void)
+{
+ PyObject *mod, *dict;
+
+ if (PyType_Ready(&event_type) == -1)
+ return;
+
+ if (!init_globals())
+ return;
+
+ mod = Py_InitModule3("_inotify", methods, doc);
+
+ dict = PyModule_GetDict(mod);
+
+ if (dict)
+ define_consts(dict);
+}
+#endif
diff --git a/hgext/inotify/linux/watcher.py b/hgext/inotify/linux/watcher.py
new file mode 100644
index 0000000..cd62006
--- /dev/null
+++ b/hgext/inotify/linux/watcher.py
@@ -0,0 +1,335 @@
+# watcher.py - high-level interfaces to the Linux inotify subsystem
+
+# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+
+# This library is free software; you can redistribute it and/or modify
+# it under the terms of version 2.1 of the GNU Lesser General Public
+# License, or any later version.
+
+'''High-level interfaces to the Linux inotify subsystem.
+
+The inotify subsystem provides an efficient mechanism for file status
+monitoring and change notification.
+
+The watcher class hides the low-level details of the inotify
+interface, and provides a Pythonic wrapper around it. It generates
+events that provide somewhat more information than raw inotify makes
+available.
+
+The autowatcher class is more useful, as it automatically watches
+newly-created directories on your behalf.'''
+
+__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
+
+import _inotify as inotify
+import array
+import errno
+import fcntl
+import os
+import termios
+
+
+class event(object):
+ '''Derived inotify event class.
+
+ The following fields are available:
+
+ mask: event mask, indicating what kind of event this is
+
+ cookie: rename cookie, if a rename-related event
+
+ path: path of the directory in which the event occurred
+
+ name: name of the directory entry to which the event occurred
+ (may be None if the event happened to a watched directory)
+
+ fullpath: complete path at which the event occurred
+
+ wd: watch descriptor that triggered this event'''
+
+ __slots__ = (
+ 'cookie',
+ 'fullpath',
+ 'mask',
+ 'name',
+ 'path',
+ 'raw',
+ 'wd',
+ )
+
+ def __init__(self, raw, path):
+ self.path = path
+ self.raw = raw
+ if raw.name:
+ self.fullpath = path + '/' + raw.name
+ else:
+ self.fullpath = path
+
+ self.wd = raw.wd
+ self.mask = raw.mask
+ self.cookie = raw.cookie
+ self.name = raw.name
+
+ def __repr__(self):
+ r = repr(self.raw)
+ return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
+
+
+_event_props = {
+ 'access': 'File was accessed',
+ 'modify': 'File was modified',
+ 'attrib': 'Attribute of a directory entry was changed',
+ 'close_write': 'File was closed after being written to',
+ 'close_nowrite': 'File was closed without being written to',
+ 'open': 'File was opened',
+ 'moved_from': 'Directory entry was renamed from this name',
+ 'moved_to': 'Directory entry was renamed to this name',
+ 'create': 'Directory entry was created',
+ 'delete': 'Directory entry was deleted',
+ 'delete_self': 'The watched directory entry was deleted',
+ 'move_self': 'The watched directory entry was renamed',
+ 'unmount': 'Directory was unmounted, and can no longer be watched',
+ 'q_overflow': 'Kernel dropped events due to queue overflow',
+ 'ignored': 'Directory entry is no longer being watched',
+ 'isdir': 'Event occurred on a directory',
+ }
+
+for k, v in _event_props.iteritems():
+ mask = getattr(inotify, 'IN_' + k.upper())
+ def getter(self):
+ return self.mask & mask
+ getter.__name__ = k
+ getter.__doc__ = v
+ setattr(event, k, property(getter, doc=v))
+
+del _event_props
+
+
+class watcher(object):
+ '''Provide a Pythonic interface to the low-level inotify API.
+
+ Also adds derived information to each event that is not available
+ through the normal inotify API, such as directory name.'''
+
+ __slots__ = (
+ 'fd',
+ '_paths',
+ '_wds',
+ )
+
+ def __init__(self):
+ '''Create a new inotify instance.'''
+
+ self.fd = inotify.init()
+ self._paths = {}
+ self._wds = {}
+
+ def fileno(self):
+ '''Return the file descriptor this watcher uses.
+
+ Useful for passing to select and poll.'''
+
+ return self.fd
+
+ def add(self, path, mask):
+ '''Add or modify a watch.
+
+ Return the watch descriptor added or modified.'''
+
+ path = os.path.normpath(path)
+ wd = inotify.add_watch(self.fd, path, mask)
+ self._paths[path] = wd, mask
+ self._wds[wd] = path, mask
+ return wd
+
+ def remove(self, wd):
+ '''Remove the given watch.'''
+
+ inotify.remove_watch(self.fd, wd)
+ self._remove(wd)
+
+ def _remove(self, wd):
+ path_mask = self._wds.pop(wd, None)
+ if path_mask is not None:
+ self._paths.pop(path_mask[0])
+
+ def path(self, path):
+ '''Return a (watch descriptor, event mask) pair for the given path.
+
+ If the path is not being watched, return None.'''
+
+ return self._paths.get(path)
+
+ def wd(self, wd):
+ '''Return a (path, event mask) pair for the given watch descriptor.
+
+ If the watch descriptor is not valid or not associated with
+ this watcher, return None.'''
+
+ return self._wds.get(wd)
+
+ def read(self, bufsize=None):
+ '''Read a list of queued inotify events.
+
+ If bufsize is zero, only return those events that can be read
+ immediately without blocking. Otherwise, block until events are
+ available.'''
+
+ events = []
+ for evt in inotify.read(self.fd, bufsize):
+ events.append(event(evt, self._wds[evt.wd][0]))
+ if evt.mask & inotify.IN_IGNORED:
+ self._remove(evt.wd)
+ elif evt.mask & inotify.IN_UNMOUNT:
+ self.close()
+ return events
+
+ def close(self):
+ '''Shut down this watcher.
+
+ All subsequent method calls are likely to raise exceptions.'''
+
+ os.close(self.fd)
+ self.fd = None
+ self._paths = None
+ self._wds = None
+
+ def __len__(self):
+ '''Return the number of active watches.'''
+
+ return len(self._paths)
+
+ def __iter__(self):
+ '''Yield a (path, watch descriptor, event mask) tuple for each
+ entry being watched.'''
+
+ for path, (wd, mask) in self._paths.iteritems():
+ yield path, wd, mask
+
+ def __del__(self):
+ if self.fd is not None:
+ os.close(self.fd)
+
+ ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
+
+ def add_iter(self, path, mask, onerror=None):
+ '''Add or modify watches over path and its subdirectories.
+
+ Yield each added or modified watch descriptor.
+
+ To ensure that this method runs to completion, you must
+ iterate over all of its results, even if you do not care what
+ they are. For example:
+
+ for wd in w.add_iter(path, mask):
+ pass
+
+ By default, errors are ignored. If optional arg "onerror" is
+ specified, it should be a function; it will be called with one
+ argument, an OSError instance. It can report the error to
+ continue with the walk, or raise the exception to abort the
+ walk.'''
+
+ # Add the IN_ONLYDIR flag to the event mask, to avoid a possible
+ # race when adding a subdirectory. In the time between the
+ # event being queued by the kernel and us processing it, the
+ # directory may have been deleted, or replaced with a different
+ # kind of entry with the same name.
+
+ submask = mask | inotify.IN_ONLYDIR
+
+ try:
+ yield self.add(path, mask)
+ except OSError, err:
+ if onerror and err.errno not in self.ignored_errors:
+ onerror(err)
+ for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
+ for d in dirs:
+ try:
+ yield self.add(root + '/' + d, submask)
+ except OSError, err:
+ if onerror and err.errno not in self.ignored_errors:
+ onerror(err)
+
+ def add_all(self, path, mask, onerror=None):
+ '''Add or modify watches over path and its subdirectories.
+
+ Return a list of added or modified watch descriptors.
+
+ By default, errors are ignored. If optional arg "onerror" is
+ specified, it should be a function; it will be called with one
+ argument, an OSError instance. It can report the error to
+ continue with the walk, or raise the exception to abort the
+ walk.'''
+
+ return [w for w in self.add_iter(path, mask, onerror)]
+
+
+class autowatcher(watcher):
+ '''watcher class that automatically watches newly created directories.'''
+
+ __slots__ = (
+ 'addfilter',
+ )
+
+ def __init__(self, addfilter=None):
+ '''Create a new inotify instance.
+
+ This instance will automatically watch newly created
+ directories.
+
+ If the optional addfilter parameter is not None, it must be a
+ callable that takes one parameter. It will be called each time
+ a directory is about to be automatically watched. If it returns
+ True, the directory will be watched if it still exists,
+ otherwise, it will beb skipped.'''
+
+ super(autowatcher, self).__init__()
+ self.addfilter = addfilter
+
+ _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
+
+ def read(self, bufsize=None):
+ events = super(autowatcher, self).read(bufsize)
+ for evt in events:
+ if evt.mask & self._dir_create_mask == self._dir_create_mask:
+ if self.addfilter is None or self.addfilter(evt):
+ parentmask = self._wds[evt.wd][1]
+ # See note about race avoidance via IN_ONLYDIR above.
+ mask = parentmask | inotify.IN_ONLYDIR
+ try:
+ self.add_all(evt.fullpath, mask)
+ except OSError, err:
+ if err.errno not in self.ignored_errors:
+ raise
+ return events
+
+
+class threshold(object):
+ '''Class that indicates whether a file descriptor has reached a
+ threshold of readable bytes available.
+
+ This class is not thread-safe.'''
+
+ __slots__ = (
+ 'fd',
+ 'threshold',
+ '_iocbuf',
+ )
+
+ def __init__(self, fd, threshold=1024):
+ self.fd = fd
+ self.threshold = threshold
+ self._iocbuf = array.array('i', [0])
+
+ def readable(self):
+ '''Return the number of bytes readable on this file descriptor.'''
+
+ fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
+ return self._iocbuf[0]
+
+ def __call__(self):
+ '''Indicate whether the number of readable bytes has met or
+ exceeded the threshold.'''
+
+ return self.readable() >= self.threshold
diff --git a/hgext/inotify/linuxserver.py b/hgext/inotify/linuxserver.py
new file mode 100644
index 0000000..a92b540
--- /dev/null
+++ b/hgext/inotify/linuxserver.py
@@ -0,0 +1,444 @@
+# linuxserver.py - inotify status server for linux
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial.i18n import _
+from mercurial import osutil, util, error
+import server
+import errno, os, select, stat, sys, time
+
+try:
+ import linux as inotify
+ from linux import watcher
+except ImportError:
+ raise
+
+def walkrepodirs(dirstate, absroot):
+ '''Iterate over all subdirectories of this repo.
+ Exclude the .hg directory, any nested repos, and ignored dirs.'''
+ def walkit(dirname, top):
+ fullpath = server.join(absroot, dirname)
+ try:
+ for name, kind in osutil.listdir(fullpath):
+ if kind == stat.S_IFDIR:
+ if name == '.hg':
+ if not top:
+ return
+ else:
+ d = server.join(dirname, name)
+ if dirstate._ignore(d):
+ continue
+ for subdir in walkit(d, False):
+ yield subdir
+ except OSError, err:
+ if err.errno not in server.walk_ignored_errors:
+ raise
+ yield fullpath
+
+ return walkit('', True)
+
+def _explain_watch_limit(ui, dirstate, rootabs):
+ path = '/proc/sys/fs/inotify/max_user_watches'
+ try:
+ limit = int(util.readfile(path))
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ raise util.Abort(_('this system does not seem to '
+ 'support inotify'))
+ ui.warn(_('*** the current per-user limit on the number '
+ 'of inotify watches is %s\n') % limit)
+ ui.warn(_('*** this limit is too low to watch every '
+ 'directory in this repository\n'))
+ ui.warn(_('*** counting directories: '))
+ ndirs = len(list(walkrepodirs(dirstate, rootabs)))
+ ui.warn(_('found %d\n') % ndirs)
+ newlimit = min(limit, 1024)
+ while newlimit < ((limit + ndirs) * 1.1):
+ newlimit *= 2
+ ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') %
+ (limit, newlimit))
+ ui.warn(_('*** echo %d > %s\n') % (newlimit, path))
+ raise util.Abort(_('cannot watch %s until inotify watch limit is raised')
+ % rootabs)
+
+class pollable(object):
+ """
+ Interface to support polling.
+ The file descriptor returned by fileno() is registered to a polling
+ object.
+ Usage:
+ Every tick, check if an event has happened since the last tick:
+ * If yes, call handle_events
+ * If no, call handle_timeout
+ """
+ poll_events = select.POLLIN
+ instances = {}
+ poll = select.poll()
+
+ def fileno(self):
+ raise NotImplementedError
+
+ def handle_events(self, events):
+ raise NotImplementedError
+
+ def handle_timeout(self):
+ raise NotImplementedError
+
+ def shutdown(self):
+ raise NotImplementedError
+
+ def register(self, timeout):
+ fd = self.fileno()
+
+ pollable.poll.register(fd, pollable.poll_events)
+ pollable.instances[fd] = self
+
+ self.registered = True
+ self.timeout = timeout
+
+ def unregister(self):
+ pollable.poll.unregister(self)
+ self.registered = False
+
+ @classmethod
+ def run(cls):
+ while True:
+ timeout = None
+ timeobj = None
+ for obj in cls.instances.itervalues():
+ if obj.timeout is not None and (timeout is None
+ or obj.timeout < timeout):
+ timeout, timeobj = obj.timeout, obj
+ try:
+ events = cls.poll.poll(timeout)
+ except select.error, err:
+ if err.args[0] == errno.EINTR:
+ continue
+ raise
+ if events:
+ by_fd = {}
+ for fd, event in events:
+ by_fd.setdefault(fd, []).append(event)
+
+ for fd, events in by_fd.iteritems():
+ cls.instances[fd].handle_pollevents(events)
+
+ elif timeobj:
+ timeobj.handle_timeout()
+
+def eventaction(code):
+ """
+ Decorator to help handle events in repowatcher
+ """
+ def decorator(f):
+ def wrapper(self, wpath):
+ if code == 'm' and wpath in self.lastevent and \
+ self.lastevent[wpath] in 'cm':
+ return
+ self.lastevent[wpath] = code
+ self.timeout = 250
+
+ f(self, wpath)
+
+ wrapper.func_name = f.func_name
+ return wrapper
+ return decorator
+
+class repowatcher(server.repowatcher, pollable):
+ """
+ Watches inotify events
+ """
+ mask = (
+ inotify.IN_ATTRIB |
+ inotify.IN_CREATE |
+ inotify.IN_DELETE |
+ inotify.IN_DELETE_SELF |
+ inotify.IN_MODIFY |
+ inotify.IN_MOVED_FROM |
+ inotify.IN_MOVED_TO |
+ inotify.IN_MOVE_SELF |
+ inotify.IN_ONLYDIR |
+ inotify.IN_UNMOUNT |
+ 0)
+
+ def __init__(self, ui, dirstate, root):
+ server.repowatcher.__init__(self, ui, dirstate, root)
+
+ self.lastevent = {}
+ self.dirty = False
+ try:
+ self.watcher = watcher.watcher()
+ except OSError, err:
+ raise util.Abort(_('inotify service not available: %s') %
+ err.strerror)
+ self.threshold = watcher.threshold(self.watcher)
+ self.fileno = self.watcher.fileno
+ self.register(timeout=None)
+
+ self.handle_timeout()
+ self.scan()
+
+ def event_time(self):
+ last = self.last_event
+ now = time.time()
+ self.last_event = now
+
+ if last is None:
+ return 'start'
+ delta = now - last
+ if delta < 5:
+ return '+%.3f' % delta
+ if delta < 50:
+ return '+%.2f' % delta
+ return '+%.1f' % delta
+
+ def add_watch(self, path, mask):
+ if not path:
+ return
+ if self.watcher.path(path) is None:
+ if self.ui.debugflag:
+ self.ui.note(_('watching %r\n') % path[self.prefixlen:])
+ try:
+ self.watcher.add(path, mask)
+ except OSError, err:
+ if err.errno in (errno.ENOENT, errno.ENOTDIR):
+ return
+ if err.errno != errno.ENOSPC:
+ raise
+ _explain_watch_limit(self.ui, self.dirstate, self.wprefix)
+
+ def setup(self):
+ self.ui.note(_('watching directories under %r\n') % self.wprefix)
+ self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE)
+
+ def scan(self, topdir=''):
+ ds = self.dirstate._map.copy()
+ self.add_watch(server.join(self.wprefix, topdir), self.mask)
+ for root, dirs, files in server.walk(self.dirstate, self.wprefix,
+ topdir):
+ for d in dirs:
+ self.add_watch(server.join(root, d), self.mask)
+ wroot = root[self.prefixlen:]
+ for fn in files:
+ wfn = server.join(wroot, fn)
+ self.updatefile(wfn, self.getstat(wfn))
+ ds.pop(wfn, None)
+ wtopdir = topdir
+ if wtopdir and wtopdir[-1] != '/':
+ wtopdir += '/'
+ for wfn, state in ds.iteritems():
+ if not wfn.startswith(wtopdir):
+ continue
+ try:
+ st = self.stat(wfn)
+ except OSError:
+ status = state[0]
+ self.deletefile(wfn, status)
+ else:
+ self.updatefile(wfn, st)
+ self.check_deleted('!')
+ self.check_deleted('r')
+
+ @eventaction('c')
+ def created(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ try:
+ st = self.stat(wpath)
+ if stat.S_ISREG(st[0]) or stat.S_ISLNK(st[0]):
+ self.updatefile(wpath, st)
+ except OSError:
+ pass
+
+ @eventaction('m')
+ def modified(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ try:
+ st = self.stat(wpath)
+ if stat.S_ISREG(st[0]):
+ if self.dirstate[wpath] in 'lmn':
+ self.updatefile(wpath, st)
+ except OSError:
+ pass
+
+ @eventaction('d')
+ def deleted(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ elif wpath.startswith('.hg/'):
+ return
+
+ self.deletefile(wpath, self.dirstate[wpath])
+
+ def process_create(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: created %s\n') %
+ (self.event_time(), wpath))
+
+ if evt.mask & inotify.IN_ISDIR:
+ self.scan(wpath)
+ else:
+ self.created(wpath)
+
+ def process_delete(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: deleted %s\n') %
+ (self.event_time(), wpath))
+
+ if evt.mask & inotify.IN_ISDIR:
+ tree = self.tree.dir(wpath)
+ todelete = [wfn for wfn, ignore in tree.walk('?')]
+ for fn in todelete:
+ self.deletefile(fn, '?')
+ self.scan(wpath)
+ else:
+ self.deleted(wpath)
+
+ def process_modify(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: modified %s\n') %
+ (self.event_time(), wpath))
+
+ if not (evt.mask & inotify.IN_ISDIR):
+ self.modified(wpath)
+
+ def process_unmount(self, evt):
+ self.ui.warn(_('filesystem containing %s was unmounted\n') %
+ evt.fullpath)
+ sys.exit(0)
+
+ def handle_pollevents(self, events):
+ if self.ui.debugflag:
+ self.ui.note(_('%s readable: %d bytes\n') %
+ (self.event_time(), self.threshold.readable()))
+ if not self.threshold():
+ if self.registered:
+ if self.ui.debugflag:
+ self.ui.note(_('%s below threshold - unhooking\n') %
+ (self.event_time()))
+ self.unregister()
+ self.timeout = 250
+ else:
+ self.read_events()
+
+ def read_events(self, bufsize=None):
+ events = self.watcher.read(bufsize)
+ if self.ui.debugflag:
+ self.ui.note(_('%s reading %d events\n') %
+ (self.event_time(), len(events)))
+ for evt in events:
+ if evt.fullpath == self.wprefix[:-1]:
+ # events on the root of the repository
+ # itself, e.g. permission changes or repository move
+ continue
+ assert evt.fullpath.startswith(self.wprefix)
+ wpath = evt.fullpath[self.prefixlen:]
+
+ # paths have been normalized, wpath never ends with a '/'
+
+ if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR:
+ # ignore subdirectories of .hg/ (merge, patches...)
+ continue
+ if wpath == ".hg/wlock":
+ if evt.mask & inotify.IN_DELETE:
+ self.dirstate.invalidate()
+ self.dirty = False
+ self.scan()
+ elif evt.mask & inotify.IN_CREATE:
+ self.dirty = True
+ else:
+ if self.dirty:
+ continue
+
+ if evt.mask & inotify.IN_UNMOUNT:
+ self.process_unmount(wpath, evt)
+ elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB):
+ self.process_modify(wpath, evt)
+ elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF |
+ inotify.IN_MOVED_FROM):
+ self.process_delete(wpath, evt)
+ elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO):
+ self.process_create(wpath, evt)
+
+ self.lastevent.clear()
+
+ def handle_timeout(self):
+ if not self.registered:
+ if self.ui.debugflag:
+ self.ui.note(_('%s hooking back up with %d bytes readable\n') %
+ (self.event_time(), self.threshold.readable()))
+ self.read_events(0)
+ self.register(timeout=None)
+
+ self.timeout = None
+
+ def shutdown(self):
+ self.watcher.close()
+
+ def debug(self):
+ """
+ Returns a sorted list of relatives paths currently watched,
+ for debugging purposes.
+ """
+ return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher)
+
+class socketlistener(server.socketlistener, pollable):
+ """
+ Listens for client queries on unix socket inotify.sock
+ """
+ def __init__(self, ui, root, repowatcher, timeout):
+ server.socketlistener.__init__(self, ui, root, repowatcher, timeout)
+ self.register(timeout=timeout)
+
+ def handle_timeout(self):
+ raise server.TimeoutException
+
+ def handle_pollevents(self, events):
+ for e in events:
+ self.accept_connection()
+
+ def shutdown(self):
+ self.sock.close()
+ try:
+ os.unlink(self.sockpath)
+ if self.realsockpath:
+ os.unlink(self.realsockpath)
+ os.rmdir(os.path.dirname(self.realsockpath))
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def answer_stat_query(self, cs):
+ if self.repowatcher.timeout:
+ # We got a query while a rescan is pending. Make sure we
+ # rescan before responding, or we could give back a wrong
+ # answer.
+ self.repowatcher.handle_timeout()
+ return server.socketlistener.answer_stat_query(self, cs)
+
+class master(object):
+ def __init__(self, ui, dirstate, root, timeout=None):
+ self.ui = ui
+ self.repowatcher = repowatcher(ui, dirstate, root)
+ self.socketlistener = socketlistener(ui, root, self.repowatcher,
+ timeout)
+
+ def shutdown(self):
+ for obj in pollable.instances.itervalues():
+ try:
+ obj.shutdown()
+ except error.SignalInterrupt:
+ pass
+
+ def run(self):
+ self.repowatcher.setup()
+ self.ui.note(_('finished setup\n'))
+ if os.getenv('TIME_STARTUP'):
+ sys.exit(0)
+ pollable.run()
diff --git a/hgext/inotify/server.py b/hgext/inotify/server.py
new file mode 100644
index 0000000..b654b17
--- /dev/null
+++ b/hgext/inotify/server.py
@@ -0,0 +1,492 @@
+# server.py - common entry point for inotify status server
+#
+# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial.i18n import _
+from mercurial import cmdutil, osutil, util
+import common
+
+import errno
+import os
+import socket
+import stat
+import struct
+import sys
+import tempfile
+
+class AlreadyStartedException(Exception):
+ pass
+class TimeoutException(Exception):
+ pass
+
+def join(a, b):
+ if a:
+ if a[-1] == '/':
+ return a + b
+ return a + '/' + b
+ return b
+
+def split(path):
+ c = path.rfind('/')
+ if c == -1:
+ return '', path
+ return path[:c], path[c + 1:]
+
+walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
+
+def walk(dirstate, absroot, root):
+ '''Like os.walk, but only yields regular files.'''
+
+ # This function is critical to performance during startup.
+
+ def walkit(root, reporoot):
+ files, dirs = [], []
+
+ try:
+ fullpath = join(absroot, root)
+ for name, kind in osutil.listdir(fullpath):
+ if kind == stat.S_IFDIR:
+ if name == '.hg':
+ if not reporoot:
+ return
+ else:
+ dirs.append(name)
+ path = join(root, name)
+ if dirstate._ignore(path):
+ continue
+ for result in walkit(path, False):
+ yield result
+ elif kind in (stat.S_IFREG, stat.S_IFLNK):
+ files.append(name)
+ yield fullpath, dirs, files
+
+ except OSError, err:
+ if err.errno == errno.ENOTDIR:
+ # fullpath was a directory, but has since been replaced
+ # by a file.
+ yield fullpath, dirs, files
+ elif err.errno not in walk_ignored_errors:
+ raise
+
+ return walkit(root, root == '')
+
+class directory(object):
+ """
+ Representing a directory
+
+ * path is the relative path from repo root to this directory
+ * files is a dict listing the files in this directory
+ - keys are file names
+ - values are file status
+ * dirs is a dict listing the subdirectories
+ - key are subdirectories names
+ - values are directory objects
+ """
+ def __init__(self, relpath=''):
+ self.path = relpath
+ self.files = {}
+ self.dirs = {}
+
+ def dir(self, relpath):
+ """
+ Returns the directory contained at the relative path relpath.
+ Creates the intermediate directories if necessary.
+ """
+ if not relpath:
+ return self
+ l = relpath.split('/')
+ ret = self
+ while l:
+ next = l.pop(0)
+ try:
+ ret = ret.dirs[next]
+ except KeyError:
+ d = directory(join(ret.path, next))
+ ret.dirs[next] = d
+ ret = d
+ return ret
+
+ def walk(self, states, visited=None):
+ """
+ yield (filename, status) pairs for items in the trees
+ that have status in states.
+ filenames are relative to the repo root
+ """
+ for file, st in self.files.iteritems():
+ if st in states:
+ yield join(self.path, file), st
+ for dir in self.dirs.itervalues():
+ if visited is not None:
+ visited.add(dir.path)
+ for e in dir.walk(states):
+ yield e
+
+ def lookup(self, states, path, visited):
+ """
+ yield root-relative filenames that match path, and whose
+ status are in states:
+ * if path is a file, yield path
+ * if path is a directory, yield directory files
+ * if path is not tracked, yield nothing
+ """
+ if path[-1] == '/':
+ path = path[:-1]
+
+ paths = path.split('/')
+
+ # we need to check separately for last node
+ last = paths.pop()
+
+ tree = self
+ try:
+ for dir in paths:
+ tree = tree.dirs[dir]
+ except KeyError:
+ # path is not tracked
+ visited.add(tree.path)
+ return
+
+ try:
+ # if path is a directory, walk it
+ target = tree.dirs[last]
+ visited.add(target.path)
+ for file, st in target.walk(states, visited):
+ yield file
+ except KeyError:
+ try:
+ if tree.files[last] in states:
+ # path is a file
+ visited.add(tree.path)
+ yield path
+ except KeyError:
+ # path is not tracked
+ pass
+
+class repowatcher(object):
+ """
+ Watches inotify events
+ """
+ statuskeys = 'almr!?'
+
+ def __init__(self, ui, dirstate, root):
+ self.ui = ui
+ self.dirstate = dirstate
+
+ self.wprefix = join(root, '')
+ self.prefixlen = len(self.wprefix)
+
+ self.tree = directory()
+ self.statcache = {}
+ self.statustrees = dict([(s, directory()) for s in self.statuskeys])
+
+ self.ds_info = self.dirstate_info()
+
+ self.last_event = None
+
+
+ def handle_timeout(self):
+ pass
+
+ def dirstate_info(self):
+ try:
+ st = os.lstat(self.wprefix + '.hg/dirstate')
+ return st.st_mtime, st.st_ino
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return 0, 0
+
+ def filestatus(self, fn, st):
+ try:
+ type_, mode, size, time = self.dirstate._map[fn][:4]
+ except KeyError:
+ type_ = '?'
+ if type_ == 'n':
+ st_mode, st_size, st_mtime = st
+ if size == -1:
+ return 'l'
+ if size and (size != st_size or (mode ^ st_mode) & 0100):
+ return 'm'
+ if time != int(st_mtime):
+ return 'l'
+ return 'n'
+ if type_ == '?' and self.dirstate._dirignore(fn):
+ # we must check not only if the file is ignored, but if any part
+ # of its path match an ignore pattern
+ return 'i'
+ return type_
+
+ def updatefile(self, wfn, osstat):
+ '''
+ update the file entry of an existing file.
+
+ osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
+ '''
+
+ self._updatestatus(wfn, self.filestatus(wfn, osstat))
+
+ def deletefile(self, wfn, oldstatus):
+ '''
+ update the entry of a file which has been deleted.
+
+ oldstatus: char in statuskeys, status of the file before deletion
+ '''
+ if oldstatus == 'r':
+ newstatus = 'r'
+ elif oldstatus in 'almn':
+ newstatus = '!'
+ else:
+ newstatus = None
+
+ self.statcache.pop(wfn, None)
+ self._updatestatus(wfn, newstatus)
+
+ def _updatestatus(self, wfn, newstatus):
+ '''
+ Update the stored status of a file.
+
+ newstatus: - char in (statuskeys + 'ni'), new status to apply.
+ - or None, to stop tracking wfn
+ '''
+ root, fn = split(wfn)
+ d = self.tree.dir(root)
+
+ oldstatus = d.files.get(fn)
+ # oldstatus can be either:
+ # - None : fn is new
+ # - a char in statuskeys: fn is a (tracked) file
+
+ if self.ui.debugflag and oldstatus != newstatus:
+ self.ui.note(_('status: %r %s -> %s\n') %
+ (wfn, oldstatus, newstatus))
+
+ if oldstatus and oldstatus in self.statuskeys \
+ and oldstatus != newstatus:
+ del self.statustrees[oldstatus].dir(root).files[fn]
+
+ if newstatus in (None, 'i'):
+ d.files.pop(fn, None)
+ elif oldstatus != newstatus:
+ d.files[fn] = newstatus
+ if newstatus != 'n':
+ self.statustrees[newstatus].dir(root).files[fn] = newstatus
+
+ def check_deleted(self, key):
+ # Files that had been deleted but were present in the dirstate
+ # may have vanished from the dirstate; we must clean them up.
+ nuke = []
+ for wfn, ignore in self.statustrees[key].walk(key):
+ if wfn not in self.dirstate:
+ nuke.append(wfn)
+ for wfn in nuke:
+ root, fn = split(wfn)
+ del self.statustrees[key].dir(root).files[fn]
+ del self.tree.dir(root).files[fn]
+
+ def update_hgignore(self):
+ # An update of the ignore file can potentially change the
+ # states of all unknown and ignored files.
+
+ # XXX If the user has other ignore files outside the repo, or
+ # changes their list of ignore files at run time, we'll
+ # potentially never see changes to them. We could get the
+ # client to report to us what ignore data they're using.
+ # But it's easier to do nothing than to open that can of
+ # worms.
+
+ if '_ignore' in self.dirstate.__dict__:
+ delattr(self.dirstate, '_ignore')
+ self.ui.note(_('rescanning due to .hgignore change\n'))
+ self.handle_timeout()
+ self.scan()
+
+ def getstat(self, wpath):
+ try:
+ return self.statcache[wpath]
+ except KeyError:
+ try:
+ return self.stat(wpath)
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def stat(self, wpath):
+ try:
+ st = os.lstat(join(self.wprefix, wpath))
+ ret = st.st_mode, st.st_size, st.st_mtime
+ self.statcache[wpath] = ret
+ return ret
+ except OSError:
+ self.statcache.pop(wpath, None)
+ raise
+
+class socketlistener(object):
+ """
+ Listens for client queries on unix socket inotify.sock
+ """
+ def __init__(self, ui, root, repowatcher, timeout):
+ self.ui = ui
+ self.repowatcher = repowatcher
+ self.sock = socket.socket(socket.AF_UNIX)
+ self.sockpath = join(root, '.hg/inotify.sock')
+
+ self.realsockpath = self.sockpath
+ if os.path.islink(self.sockpath):
+ if os.path.exists(self.sockpath):
+ self.realsockpath = os.readlink(self.sockpath)
+ else:
+ raise util.Abort('inotify-server: cannot start: '
+ '.hg/inotify.sock is a broken symlink')
+ try:
+ self.sock.bind(self.realsockpath)
+ except socket.error, err:
+ if err.args[0] == errno.EADDRINUSE:
+ raise AlreadyStartedException(_('cannot start: socket is '
+ 'already bound'))
+ if err.args[0] == "AF_UNIX path too long":
+ tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
+ self.realsockpath = os.path.join(tempdir, "inotify.sock")
+ try:
+ self.sock.bind(self.realsockpath)
+ os.symlink(self.realsockpath, self.sockpath)
+ except (OSError, socket.error), inst:
+ try:
+ os.unlink(self.realsockpath)
+ except OSError:
+ pass
+ os.rmdir(tempdir)
+ if inst.errno == errno.EEXIST:
+ raise AlreadyStartedException(_('cannot start: tried '
+ 'linking .hg/inotify.sock to a temporary socket but'
+ ' .hg/inotify.sock already exists'))
+ raise
+ else:
+ raise
+ self.sock.listen(5)
+ self.fileno = self.sock.fileno
+
+ def answer_stat_query(self, cs):
+ names = cs.read().split('\0')
+
+ states = names.pop()
+
+ self.ui.note(_('answering query for %r\n') % states)
+
+ visited = set()
+ if not names:
+ def genresult(states, tree):
+ for fn, state in tree.walk(states):
+ yield fn
+ else:
+ def genresult(states, tree):
+ for fn in names:
+ for f in tree.lookup(states, fn, visited):
+ yield f
+
+ return ['\0'.join(r) for r in [
+ genresult('l', self.repowatcher.statustrees['l']),
+ genresult('m', self.repowatcher.statustrees['m']),
+ genresult('a', self.repowatcher.statustrees['a']),
+ genresult('r', self.repowatcher.statustrees['r']),
+ genresult('!', self.repowatcher.statustrees['!']),
+ '?' in states
+ and genresult('?', self.repowatcher.statustrees['?'])
+ or [],
+ [],
+ 'c' in states and genresult('n', self.repowatcher.tree) or [],
+ visited
+ ]]
+
+ def answer_dbug_query(self):
+ return ['\0'.join(self.repowatcher.debug())]
+
+ def accept_connection(self):
+ sock, addr = self.sock.accept()
+
+ cs = common.recvcs(sock)
+ version = ord(cs.read(1))
+
+ if version != common.version:
+ self.ui.warn(_('received query from incompatible client '
+ 'version %d\n') % version)
+ try:
+ # try to send back our version to the client
+ # this way, the client too is informed of the mismatch
+ sock.sendall(chr(common.version))
+ except socket.error:
+ pass
+ return
+
+ type = cs.read(4)
+
+ if type == 'STAT':
+ results = self.answer_stat_query(cs)
+ elif type == 'DBUG':
+ results = self.answer_dbug_query()
+ else:
+ self.ui.warn(_('unrecognized query type: %s\n') % type)
+ return
+
+ try:
+ try:
+ v = chr(common.version)
+
+ sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
+ *map(len, results)))
+ sock.sendall(''.join(results))
+ finally:
+ sock.shutdown(socket.SHUT_WR)
+ except socket.error, err:
+ if err.args[0] != errno.EPIPE:
+ raise
+
+if sys.platform.startswith('linux'):
+ import linuxserver as _server
+else:
+ raise ImportError
+
+master = _server.master
+
+def start(ui, dirstate, root, opts):
+ timeout = opts.get('idle_timeout')
+ if timeout:
+ timeout = float(timeout) * 60000
+ else:
+ timeout = None
+
+ class service(object):
+ def init(self):
+ try:
+ self.master = master(ui, dirstate, root, timeout)
+ except AlreadyStartedException, inst:
+ raise util.Abort("inotify-server: %s" % inst)
+
+ def run(self):
+ try:
+ try:
+ self.master.run()
+ except TimeoutException:
+ pass
+ finally:
+ self.master.shutdown()
+
+ if 'inserve' not in sys.argv:
+ runargs = util.hgcmd() + ['inserve', '-R', root]
+ else:
+ runargs = util.hgcmd() + sys.argv[1:]
+
+ pidfile = ui.config('inotify', 'pidfile')
+ if opts['daemon'] and pidfile is not None and 'pid-file' not in runargs:
+ runargs.append("--pid-file=%s" % pidfile)
+
+ service = service()
+ logfile = ui.config('inotify', 'log')
+
+ appendpid = ui.configbool('inotify', 'appendpid', False)
+
+ ui.debug('starting inotify server: %s\n' % ' '.join(runargs))
+ cmdutil.service(opts, initfn=service.init, runfn=service.run,
+ logfile=logfile, runargs=runargs, appendpid=appendpid)
diff --git a/hgext/interhg.py b/hgext/interhg.py
new file mode 100644
index 0000000..a998a35
--- /dev/null
+++ b/hgext/interhg.py
@@ -0,0 +1,83 @@
+# interhg.py - interhg
+#
+# Copyright 2007 OHASHI Hideya <ohachige@gmail.com>
+#
+# Contributor(s):
+# Edward Lee <edward.lee@engineering.uiuc.edu>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''expand expressions into changelog and summaries
+
+This extension allows the use of a special syntax in summaries, which
+will be automatically expanded into links or any other arbitrary
+expression, much like InterWiki does.
+
+A few example patterns (link to bug tracking, etc.) that may be used
+in your hgrc::
+
+ [interhg]
+ issues = s!issue(\\d+)!<a href="http://bts/issue\\1">issue\\1</a>!
+ bugzilla = s!((?:bug|b=|(?=#?\\d{4,}))(?:\\s*#?)(\\d+))!<a..=\\2">\\1</a>!i
+ boldify = s!(^|\\s)#(\\d+)\\b! <b>#\\2</b>!
+'''
+
+import re
+from mercurial.hgweb import hgweb_mod
+from mercurial import templatefilters, extensions
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+interhg_table = []
+
+def uisetup(ui):
+ orig_escape = templatefilters.filters["escape"]
+
+ def interhg_escape(x):
+ escstr = orig_escape(x)
+ for regexp, format in interhg_table:
+ escstr = regexp.sub(format, escstr)
+ return escstr
+
+ templatefilters.filters["escape"] = interhg_escape
+
+def interhg_refresh(orig, self, *args, **kwargs):
+ interhg_table[:] = []
+ for key, pattern in self.repo.ui.configitems('interhg'):
+ # grab the delimiter from the character after the "s"
+ unesc = pattern[1]
+ delim = re.escape(unesc)
+
+ # identify portions of the pattern, taking care to avoid escaped
+ # delimiters. the replace format and flags are optional, but delimiters
+ # are required.
+ match = re.match(r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
+ % (delim, delim, delim), pattern)
+ if not match:
+ self.repo.ui.warn(_("interhg: invalid pattern for %s: %s\n")
+ % (key, pattern))
+ continue
+
+ # we need to unescape the delimiter for regexp and format
+ delim_re = re.compile(r'(?<!\\)\\%s' % delim)
+ regexp = delim_re.sub(unesc, match.group(1))
+ format = delim_re.sub(unesc, match.group(2))
+
+ # the pattern allows for 6 regexp flags, so set them if necessary
+ flagin = match.group(3)
+ flags = 0
+ if flagin:
+ for flag in flagin.upper():
+ flags |= re.__dict__[flag]
+
+ try:
+ regexp = re.compile(regexp, flags)
+ interhg_table.append((regexp, format))
+ except re.error:
+ self.repo.ui.warn(_("interhg: invalid regexp for %s: %s\n")
+ % (key, regexp))
+ return orig(self, *args, **kwargs)
+
+extensions.wrapfunction(hgweb_mod.hgweb, 'refresh', interhg_refresh)
diff --git a/hgext/keyword.py b/hgext/keyword.py
new file mode 100644
index 0000000..54bab17
--- /dev/null
+++ b/hgext/keyword.py
@@ -0,0 +1,730 @@
+# keyword.py - $Keyword$ expansion for Mercurial
+#
+# Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# $Id$
+#
+# Keyword expansion hack against the grain of a DSCM
+#
+# There are many good reasons why this is not needed in a distributed
+# SCM, still it may be useful in very small projects based on single
+# files (like LaTeX packages), that are mostly addressed to an
+# audience not running a version control system.
+#
+# For in-depth discussion refer to
+# <http://mercurial.selenic.com/wiki/KeywordPlan>.
+#
+# Keyword expansion is based on Mercurial's changeset template mappings.
+#
+# Binary files are not touched.
+#
+# Files to act upon/ignore are specified in the [keyword] section.
+# Customized keyword template mappings in the [keywordmaps] section.
+#
+# Run "hg help keyword" and "hg kwdemo" to get info on configuration.
+
+'''expand keywords in tracked files
+
+This extension expands RCS/CVS-like or self-customized $Keywords$ in
+tracked text files selected by your configuration.
+
+Keywords are only expanded in local repositories and not stored in the
+change history. The mechanism can be regarded as a convenience for the
+current user or for archive distribution.
+
+Keywords expand to the changeset data pertaining to the latest change
+relative to the working directory parent of each file.
+
+Configuration is done in the [keyword], [keywordset] and [keywordmaps]
+sections of hgrc files.
+
+Example::
+
+ [keyword]
+ # expand keywords in every python file except those matching "x*"
+ **.py =
+ x* = ignore
+
+ [keywordset]
+ # prefer svn- over cvs-like default keywordmaps
+ svn = True
+
+.. note::
+ The more specific you are in your filename patterns the less you
+ lose speed in huge repositories.
+
+For [keywordmaps] template mapping and expansion demonstration and
+control run :hg:`kwdemo`. See :hg:`help templates` for a list of
+available templates and filters.
+
+Three additional date template filters are provided:
+
+:``utcdate``: "2006/09/18 15:13:13"
+:``svnutcdate``: "2006-09-18 15:13:13Z"
+:``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
+
+The default template mappings (view with :hg:`kwdemo -d`) can be
+replaced with customized keywords and templates. Again, run
+:hg:`kwdemo` to control the results of your configuration changes.
+
+Before changing/disabling active keywords, you must run :hg:`kwshrink`
+to avoid storing expanded keywords in the change history.
+
+To force expansion after enabling it, or a configuration change, run
+:hg:`kwexpand`.
+
+Expansions spanning more than one line and incremental expansions,
+like CVS' $Log$, are not supported. A keyword template map "Log =
+{desc}" expands to the first line of the changeset description.
+'''
+
+from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
+from mercurial import localrepo, match, patch, templatefilters, templater, util
+from mercurial import scmutil
+from mercurial.hgweb import webcommands
+from mercurial.i18n import _
+import os, re, shutil, tempfile
+
+commands.optionalrepo += ' kwdemo'
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+# hg commands that do not act on keywords
+nokwcommands = ('add addremove annotate bundle export grep incoming init log'
+ ' outgoing push tip verify convert email glog')
+
+# hg commands that trigger expansion only when writing to working dir,
+# not when reading filelog, and unexpand when reading from working dir
+restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
+
+# names of extensions using dorecord
+recordextensions = 'record'
+
+colortable = {
+ 'kwfiles.enabled': 'green bold',
+ 'kwfiles.deleted': 'cyan bold underline',
+ 'kwfiles.enabledunknown': 'green',
+ 'kwfiles.ignored': 'bold',
+ 'kwfiles.ignoredunknown': 'none'
+}
+
+# date like in cvs' $Date
+def utcdate(text):
+ ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
+ '''
+ return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
+# date like in svn's $Date
+def svnisodate(text):
+ ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
+ +0200 (Tue, 18 Aug 2009)".
+ '''
+ return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
+# date like in svn's $Id
+def svnutcdate(text):
+ ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
+ 11:00:13Z".
+ '''
+ return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
+
+templatefilters.filters.update({'utcdate': utcdate,
+ 'svnisodate': svnisodate,
+ 'svnutcdate': svnutcdate})
+
+# make keyword tools accessible
+kwtools = {'templater': None, 'hgcmd': ''}
+
+def _defaultkwmaps(ui):
+ '''Returns default keywordmaps according to keywordset configuration.'''
+ templates = {
+ 'Revision': '{node|short}',
+ 'Author': '{author|user}',
+ }
+ kwsets = ({
+ 'Date': '{date|utcdate}',
+ 'RCSfile': '{file|basename},v',
+ 'RCSFile': '{file|basename},v', # kept for backwards compatibility
+ # with hg-keyword
+ 'Source': '{root}/{file},v',
+ 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
+ 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
+ }, {
+ 'Date': '{date|svnisodate}',
+ 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
+ 'LastChangedRevision': '{node|short}',
+ 'LastChangedBy': '{author|user}',
+ 'LastChangedDate': '{date|svnisodate}',
+ })
+ templates.update(kwsets[ui.configbool('keywordset', 'svn')])
+ return templates
+
+def _shrinktext(text, subfunc):
+ '''Helper for keyword expansion removal in text.
+ Depending on subfunc also returns number of substitutions.'''
+ return subfunc(r'$\1$', text)
+
+def _preselect(wstatus, changed):
+ '''Retrieves modfied and added files from a working directory state
+ and returns the subset of each contained in given changed files
+ retrieved from a change context.'''
+ modified, added = wstatus[:2]
+ modified = [f for f in modified if f in changed]
+ added = [f for f in added if f in changed]
+ return modified, added
+
+
+class kwtemplater(object):
+ '''
+ Sets up keyword templates, corresponding keyword regex, and
+ provides keyword substitution functions.
+ '''
+
+ def __init__(self, ui, repo, inc, exc):
+ self.ui = ui
+ self.repo = repo
+ self.match = match.match(repo.root, '', [], inc, exc)
+ self.restrict = kwtools['hgcmd'] in restricted.split()
+ self.postcommit = False
+
+ kwmaps = self.ui.configitems('keywordmaps')
+ if kwmaps: # override default templates
+ self.templates = dict((k, templater.parsestring(v, False))
+ for k, v in kwmaps)
+ else:
+ self.templates = _defaultkwmaps(self.ui)
+
+ @util.propertycache
+ def escape(self):
+ '''Returns bar-separated and escaped keywords.'''
+ return '|'.join(map(re.escape, self.templates.keys()))
+
+ @util.propertycache
+ def rekw(self):
+ '''Returns regex for unexpanded keywords.'''
+ return re.compile(r'\$(%s)\$' % self.escape)
+
+ @util.propertycache
+ def rekwexp(self):
+ '''Returns regex for expanded keywords.'''
+ return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
+
+ def substitute(self, data, path, ctx, subfunc):
+ '''Replaces keywords in data with expanded template.'''
+ def kwsub(mobj):
+ kw = mobj.group(1)
+ ct = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, '', False)
+ ct.use_template(self.templates[kw])
+ self.ui.pushbuffer()
+ ct.show(ctx, root=self.repo.root, file=path)
+ ekw = templatefilters.firstline(self.ui.popbuffer())
+ return '$%s: %s $' % (kw, ekw)
+ return subfunc(kwsub, data)
+
+ def linkctx(self, path, fileid):
+ '''Similar to filelog.linkrev, but returns a changectx.'''
+ return self.repo.filectx(path, fileid=fileid).changectx()
+
+ def expand(self, path, node, data):
+ '''Returns data with keywords expanded.'''
+ if not self.restrict and self.match(path) and not util.binary(data):
+ ctx = self.linkctx(path, node)
+ return self.substitute(data, path, ctx, self.rekw.sub)
+ return data
+
+ def iskwfile(self, cand, ctx):
+ '''Returns subset of candidates which are configured for keyword
+ expansion but are not symbolic links.'''
+ return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
+
+ def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
+ '''Overwrites selected files expanding/shrinking keywords.'''
+ if self.restrict or lookup or self.postcommit: # exclude kw_copy
+ candidates = self.iskwfile(candidates, ctx)
+ if not candidates:
+ return
+ kwcmd = self.restrict and lookup # kwexpand/kwshrink
+ if self.restrict or expand and lookup:
+ mf = ctx.manifest()
+ if self.restrict or rekw:
+ re_kw = self.rekw
+ else:
+ re_kw = self.rekwexp
+ if expand:
+ msg = _('overwriting %s expanding keywords\n')
+ else:
+ msg = _('overwriting %s shrinking keywords\n')
+ for f in candidates:
+ if self.restrict:
+ data = self.repo.file(f).read(mf[f])
+ else:
+ data = self.repo.wread(f)
+ if util.binary(data):
+ continue
+ if expand:
+ if lookup:
+ ctx = self.linkctx(f, mf[f])
+ data, found = self.substitute(data, f, ctx, re_kw.subn)
+ elif self.restrict:
+ found = re_kw.search(data)
+ else:
+ data, found = _shrinktext(data, re_kw.subn)
+ if found:
+ self.ui.note(msg % f)
+ fp = self.repo.wopener(f, "wb", atomictemp=True)
+ fp.write(data)
+ fp.close()
+ if kwcmd:
+ self.repo.dirstate.normal(f)
+ elif self.postcommit:
+ self.repo.dirstate.normallookup(f)
+
+ def shrink(self, fname, text):
+ '''Returns text with all keyword substitutions removed.'''
+ if self.match(fname) and not util.binary(text):
+ return _shrinktext(text, self.rekwexp.sub)
+ return text
+
+ def shrinklines(self, fname, lines):
+ '''Returns lines with keyword substitutions removed.'''
+ if self.match(fname):
+ text = ''.join(lines)
+ if not util.binary(text):
+ return _shrinktext(text, self.rekwexp.sub).splitlines(True)
+ return lines
+
+ def wread(self, fname, data):
+ '''If in restricted mode returns data read from wdir with
+ keyword substitutions removed.'''
+ if self.restrict:
+ return self.shrink(fname, data)
+ return data
+
+class kwfilelog(filelog.filelog):
+ '''
+ Subclass of filelog to hook into its read, add, cmp methods.
+ Keywords are "stored" unexpanded, and processed on reading.
+ '''
+ def __init__(self, opener, kwt, path):
+ super(kwfilelog, self).__init__(opener, path)
+ self.kwt = kwt
+ self.path = path
+
+ def read(self, node):
+ '''Expands keywords when reading filelog.'''
+ data = super(kwfilelog, self).read(node)
+ if self.renamed(node):
+ return data
+ return self.kwt.expand(self.path, node, data)
+
+ def add(self, text, meta, tr, link, p1=None, p2=None):
+ '''Removes keyword substitutions when adding to filelog.'''
+ text = self.kwt.shrink(self.path, text)
+ return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
+
+ def cmp(self, node, text):
+ '''Removes keyword substitutions for comparison.'''
+ text = self.kwt.shrink(self.path, text)
+ return super(kwfilelog, self).cmp(node, text)
+
+def _status(ui, repo, wctx, kwt, *pats, **opts):
+ '''Bails out if [keyword] configuration is not active.
+ Returns status of working directory.'''
+ if kwt:
+ return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
+ unknown=opts.get('unknown') or opts.get('all'))
+ if ui.configitems('keyword'):
+ raise util.Abort(_('[keyword] patterns cannot match'))
+ raise util.Abort(_('no [keyword] patterns configured'))
+
+def _kwfwrite(ui, repo, expand, *pats, **opts):
+ '''Selects files and passes them to kwtemplater.overwrite.'''
+ wctx = repo[None]
+ if len(wctx.parents()) > 1:
+ raise util.Abort(_('outstanding uncommitted merge'))
+ kwt = kwtools['templater']
+ wlock = repo.wlock()
+ try:
+ status = _status(ui, repo, wctx, kwt, *pats, **opts)
+ modified, added, removed, deleted, unknown, ignored, clean = status
+ if modified or added or removed or deleted:
+ raise util.Abort(_('outstanding uncommitted changes'))
+ kwt.overwrite(wctx, clean, True, expand)
+ finally:
+ wlock.release()
+
+@command('kwdemo',
+ [('d', 'default', None, _('show default keyword template maps')),
+ ('f', 'rcfile', '',
+ _('read maps from rcfile'), _('FILE'))],
+ _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
+def demo(ui, repo, *args, **opts):
+ '''print [keywordmaps] configuration and an expansion example
+
+ Show current, custom, or default keyword template maps and their
+ expansions.
+
+ Extend the current configuration by specifying maps as arguments
+ and using -f/--rcfile to source an external hgrc file.
+
+ Use -d/--default to disable current configuration.
+
+ See :hg:`help templates` for information on templates and filters.
+ '''
+ def demoitems(section, items):
+ ui.write('[%s]\n' % section)
+ for k, v in sorted(items):
+ ui.write('%s = %s\n' % (k, v))
+
+ fn = 'demo.txt'
+ tmpdir = tempfile.mkdtemp('', 'kwdemo.')
+ ui.note(_('creating temporary repository at %s\n') % tmpdir)
+ repo = localrepo.localrepository(ui, tmpdir, True)
+ ui.setconfig('keyword', fn, '')
+ svn = ui.configbool('keywordset', 'svn')
+ # explicitly set keywordset for demo output
+ ui.setconfig('keywordset', 'svn', svn)
+
+ uikwmaps = ui.configitems('keywordmaps')
+ if args or opts.get('rcfile'):
+ ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
+ if uikwmaps:
+ ui.status(_('\textending current template maps\n'))
+ if opts.get('default') or not uikwmaps:
+ if svn:
+ ui.status(_('\toverriding default svn keywordset\n'))
+ else:
+ ui.status(_('\toverriding default cvs keywordset\n'))
+ if opts.get('rcfile'):
+ ui.readconfig(opts.get('rcfile'))
+ if args:
+ # simulate hgrc parsing
+ rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
+ fp = repo.opener('hgrc', 'w')
+ fp.writelines(rcmaps)
+ fp.close()
+ ui.readconfig(repo.join('hgrc'))
+ kwmaps = dict(ui.configitems('keywordmaps'))
+ elif opts.get('default'):
+ if svn:
+ ui.status(_('\n\tconfiguration using default svn keywordset\n'))
+ else:
+ ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
+ kwmaps = _defaultkwmaps(ui)
+ if uikwmaps:
+ ui.status(_('\tdisabling current template maps\n'))
+ for k, v in kwmaps.iteritems():
+ ui.setconfig('keywordmaps', k, v)
+ else:
+ ui.status(_('\n\tconfiguration using current keyword template maps\n'))
+ if uikwmaps:
+ kwmaps = dict(uikwmaps)
+ else:
+ kwmaps = _defaultkwmaps(ui)
+
+ uisetup(ui)
+ reposetup(ui, repo)
+ ui.write('[extensions]\nkeyword =\n')
+ demoitems('keyword', ui.configitems('keyword'))
+ demoitems('keywordset', ui.configitems('keywordset'))
+ demoitems('keywordmaps', kwmaps.iteritems())
+ keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
+ repo.wopener.write(fn, keywords)
+ repo[None].add([fn])
+ ui.note(_('\nkeywords written to %s:\n') % fn)
+ ui.note(keywords)
+ repo.dirstate.setbranch('demobranch')
+ for name, cmd in ui.configitems('hooks'):
+ if name.split('.', 1)[0].find('commit') > -1:
+ repo.ui.setconfig('hooks', name, '')
+ msg = _('hg keyword configuration and expansion example')
+ ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
+ repo.commit(text=msg)
+ ui.status(_('\n\tkeywords expanded\n'))
+ ui.write(repo.wread(fn))
+ shutil.rmtree(tmpdir, ignore_errors=True)
+
+@command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
+def expand(ui, repo, *pats, **opts):
+ '''expand keywords in the working directory
+
+ Run after (re)enabling keyword expansion.
+
+ kwexpand refuses to run if given files contain local changes.
+ '''
+ # 3rd argument sets expansion to True
+ _kwfwrite(ui, repo, True, *pats, **opts)
+
+@command('kwfiles',
+ [('A', 'all', None, _('show keyword status flags of all files')),
+ ('i', 'ignore', None, _('show files excluded from expansion')),
+ ('u', 'unknown', None, _('only show unknown (not tracked) files')),
+ ] + commands.walkopts,
+ _('hg kwfiles [OPTION]... [FILE]...'))
+def files(ui, repo, *pats, **opts):
+ '''show files configured for keyword expansion
+
+ List which files in the working directory are matched by the
+ [keyword] configuration patterns.
+
+ Useful to prevent inadvertent keyword expansion and to speed up
+ execution by including only files that are actual candidates for
+ expansion.
+
+ See :hg:`help keyword` on how to construct patterns both for
+ inclusion and exclusion of files.
+
+ With -A/--all and -v/--verbose the codes used to show the status
+ of files are::
+
+ K = keyword expansion candidate
+ k = keyword expansion candidate (not tracked)
+ I = ignored
+ i = ignored (not tracked)
+ '''
+ kwt = kwtools['templater']
+ wctx = repo[None]
+ status = _status(ui, repo, wctx, kwt, *pats, **opts)
+ cwd = pats and repo.getcwd() or ''
+ modified, added, removed, deleted, unknown, ignored, clean = status
+ files = []
+ if not opts.get('unknown') or opts.get('all'):
+ files = sorted(modified + added + clean)
+ kwfiles = kwt.iskwfile(files, wctx)
+ kwdeleted = kwt.iskwfile(deleted, wctx)
+ kwunknown = kwt.iskwfile(unknown, wctx)
+ if not opts.get('ignore') or opts.get('all'):
+ showfiles = kwfiles, kwdeleted, kwunknown
+ else:
+ showfiles = [], [], []
+ if opts.get('all') or opts.get('ignore'):
+ showfiles += ([f for f in files if f not in kwfiles],
+ [f for f in unknown if f not in kwunknown])
+ kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
+ kwstates = zip(kwlabels, 'K!kIi', showfiles)
+ fm = ui.formatter('kwfiles', opts)
+ fmt = '%.0s%s\n'
+ if opts.get('all') or ui.verbose:
+ fmt = '%s %s\n'
+ for kwstate, char, filenames in kwstates:
+ label = 'kwfiles.' + kwstate
+ for f in filenames:
+ fm.startitem()
+ fm.write('kwstatus path', fmt, char,
+ repo.pathto(f, cwd), label=label)
+ fm.end()
+
+@command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
+def shrink(ui, repo, *pats, **opts):
+ '''revert expanded keywords in the working directory
+
+ Must be run before changing/disabling active keywords.
+
+ kwshrink refuses to run if given files contain local changes.
+ '''
+ # 3rd argument sets expansion to False
+ _kwfwrite(ui, repo, False, *pats, **opts)
+
+
+def uisetup(ui):
+ ''' Monkeypatches dispatch._parse to retrieve user command.'''
+
+ def kwdispatch_parse(orig, ui, args):
+ '''Monkeypatch dispatch._parse to obtain running hg command.'''
+ cmd, func, args, options, cmdoptions = orig(ui, args)
+ kwtools['hgcmd'] = cmd
+ return cmd, func, args, options, cmdoptions
+
+ extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
+
+def reposetup(ui, repo):
+ '''Sets up repo as kwrepo for keyword substitution.
+ Overrides file method to return kwfilelog instead of filelog
+ if file matches user configuration.
+ Wraps commit to overwrite configured files with updated
+ keyword substitutions.
+ Monkeypatches patch and webcommands.'''
+
+ try:
+ if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
+ or '.hg' in util.splitpath(repo.root)
+ or repo._url.startswith('bundle:')):
+ return
+ except AttributeError:
+ pass
+
+ inc, exc = [], ['.hg*']
+ for pat, opt in ui.configitems('keyword'):
+ if opt != 'ignore':
+ inc.append(pat)
+ else:
+ exc.append(pat)
+ if not inc:
+ return
+
+ kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
+
+ class kwrepo(repo.__class__):
+ def file(self, f):
+ if f[0] == '/':
+ f = f[1:]
+ return kwfilelog(self.sopener, kwt, f)
+
+ def wread(self, filename):
+ data = super(kwrepo, self).wread(filename)
+ return kwt.wread(filename, data)
+
+ def commit(self, *args, **opts):
+ # use custom commitctx for user commands
+ # other extensions can still wrap repo.commitctx directly
+ self.commitctx = self.kwcommitctx
+ try:
+ return super(kwrepo, self).commit(*args, **opts)
+ finally:
+ del self.commitctx
+
+ def kwcommitctx(self, ctx, error=False):
+ n = super(kwrepo, self).commitctx(ctx, error)
+ # no lock needed, only called from repo.commit() which already locks
+ if not kwt.postcommit:
+ restrict = kwt.restrict
+ kwt.restrict = True
+ kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
+ False, True)
+ kwt.restrict = restrict
+ return n
+
+ def rollback(self, dryrun=False, force=False):
+ wlock = self.wlock()
+ try:
+ if not dryrun:
+ changed = self['.'].files()
+ ret = super(kwrepo, self).rollback(dryrun, force)
+ if not dryrun:
+ ctx = self['.']
+ modified, added = _preselect(self[None].status(), changed)
+ kwt.overwrite(ctx, modified, True, True)
+ kwt.overwrite(ctx, added, True, False)
+ return ret
+ finally:
+ wlock.release()
+
+ # monkeypatches
+ def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
+ '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
+ rejects or conflicts due to expanded keywords in working dir.'''
+ orig(self, ui, gp, backend, store, eolmode)
+ # shrink keywords read from working dir
+ self.lines = kwt.shrinklines(self.fname, self.lines)
+
+ def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
+ opts=None, prefix=''):
+ '''Monkeypatch patch.diff to avoid expansion.'''
+ kwt.restrict = True
+ return orig(repo, node1, node2, match, changes, opts, prefix)
+
+ def kwweb_skip(orig, web, req, tmpl):
+ '''Wraps webcommands.x turning off keyword expansion.'''
+ kwt.match = util.never
+ return orig(web, req, tmpl)
+
+ def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
+ '''Wraps cmdutil.amend expanding keywords after amend.'''
+ wlock = repo.wlock()
+ try:
+ kwt.postcommit = True
+ newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
+ if newid != old.node():
+ ctx = repo[newid]
+ kwt.restrict = True
+ kwt.overwrite(ctx, ctx.files(), False, True)
+ kwt.restrict = False
+ return newid
+ finally:
+ wlock.release()
+
+ def kw_copy(orig, ui, repo, pats, opts, rename=False):
+ '''Wraps cmdutil.copy so that copy/rename destinations do not
+ contain expanded keywords.
+ Note that the source of a regular file destination may also be a
+ symlink:
+ hg cp sym x -> x is symlink
+ cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
+ For the latter we have to follow the symlink to find out whether its
+ target is configured for expansion and we therefore must unexpand the
+ keywords in the destination.'''
+ wlock = repo.wlock()
+ try:
+ orig(ui, repo, pats, opts, rename)
+ if opts.get('dry_run'):
+ return
+ wctx = repo[None]
+ cwd = repo.getcwd()
+
+ def haskwsource(dest):
+ '''Returns true if dest is a regular file and configured for
+ expansion or a symlink which points to a file configured for
+ expansion. '''
+ source = repo.dirstate.copied(dest)
+ if 'l' in wctx.flags(source):
+ source = scmutil.canonpath(repo.root, cwd,
+ os.path.realpath(source))
+ return kwt.match(source)
+
+ candidates = [f for f in repo.dirstate.copies() if
+ 'l' not in wctx.flags(f) and haskwsource(f)]
+ kwt.overwrite(wctx, candidates, False, False)
+ finally:
+ wlock.release()
+
+ def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
+ '''Wraps record.dorecord expanding keywords after recording.'''
+ wlock = repo.wlock()
+ try:
+ # record returns 0 even when nothing has changed
+ # therefore compare nodes before and after
+ kwt.postcommit = True
+ ctx = repo['.']
+ wstatus = repo[None].status()
+ ret = orig(ui, repo, commitfunc, *pats, **opts)
+ recctx = repo['.']
+ if ctx != recctx:
+ modified, added = _preselect(wstatus, recctx.files())
+ kwt.restrict = False
+ kwt.overwrite(recctx, modified, False, True)
+ kwt.overwrite(recctx, added, False, True, True)
+ kwt.restrict = True
+ return ret
+ finally:
+ wlock.release()
+
+ def kwfilectx_cmp(orig, self, fctx):
+ # keyword affects data size, comparing wdir and filelog size does
+ # not make sense
+ if (fctx._filerev is None and
+ (self._repo._encodefilterpats or
+ kwt.match(fctx.path()) and 'l' not in fctx.flags() or
+ self.size() - 4 == fctx.size()) or
+ self.size() == fctx.size()):
+ return self._filelog.cmp(self._filenode, fctx.data())
+ return True
+
+ extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
+ extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
+ extensions.wrapfunction(patch, 'diff', kw_diff)
+ extensions.wrapfunction(cmdutil, 'amend', kw_amend)
+ extensions.wrapfunction(cmdutil, 'copy', kw_copy)
+ for c in 'annotate changeset rev filediff diff'.split():
+ extensions.wrapfunction(webcommands, c, kwweb_skip)
+ for name in recordextensions.split():
+ try:
+ record = extensions.find(name)
+ extensions.wrapfunction(record, 'dorecord', kw_dorecord)
+ except KeyError:
+ pass
+
+ repo.__class__ = kwrepo
diff --git a/hgext/largefiles/CONTRIBUTORS b/hgext/largefiles/CONTRIBUTORS
new file mode 100644
index 0000000..9bef457
--- /dev/null
+++ b/hgext/largefiles/CONTRIBUTORS
@@ -0,0 +1,4 @@
+Greg Ward, author of the original bfiles extension
+Na'Tosha Bard of Unity Technologies
+Fog Creek Software
+Special thanks to the University of Toronto and the UCOSP program
diff --git a/hgext/largefiles/__init__.py b/hgext/largefiles/__init__.py
new file mode 100644
index 0000000..12c80fa
--- /dev/null
+++ b/hgext/largefiles/__init__.py
@@ -0,0 +1,102 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''track large binary files
+
+Large binary files tend to be not very compressible, not very
+diffable, and not at all mergeable. Such files are not handled
+efficiently by Mercurial's storage format (revlog), which is based on
+compressed binary deltas; storing large binary files as regular
+Mercurial files wastes bandwidth and disk space and increases
+Mercurial's memory usage. The largefiles extension addresses these
+problems by adding a centralized client-server layer on top of
+Mercurial: largefiles live in a *central store* out on the network
+somewhere, and you only fetch the revisions that you need when you
+need them.
+
+largefiles works by maintaining a "standin file" in .hglf/ for each
+largefile. The standins are small (41 bytes: an SHA-1 hash plus
+newline) and are tracked by Mercurial. Largefile revisions are
+identified by the SHA-1 hash of their contents, which is written to
+the standin. largefiles uses that revision ID to get/put largefile
+revisions from/to the central store. This saves both disk space and
+bandwidth, since you don't need to retrieve all historical revisions
+of large files when you clone or pull.
+
+To start a new repository or add new large binary files, just add
+--large to your :hg:`add` command. For example::
+
+ $ dd if=/dev/urandom of=randomdata count=2000
+ $ hg add --large randomdata
+ $ hg commit -m 'add randomdata as a largefile'
+
+When you push a changeset that adds/modifies largefiles to a remote
+repository, its largefile revisions will be uploaded along with it.
+Note that the remote Mercurial must also have the largefiles extension
+enabled for this to work.
+
+When you pull a changeset that affects largefiles from a remote
+repository, Mercurial behaves as normal. However, when you update to
+such a revision, any largefiles needed by that revision are downloaded
+and cached (if they have never been downloaded before). This means
+that network access may be required to update to changesets you have
+not previously updated to.
+
+If you already have large files tracked by Mercurial without the
+largefiles extension, you will need to convert your repository in
+order to benefit from largefiles. This is done with the
+:hg:`lfconvert` command::
+
+ $ hg lfconvert --size 10 oldrepo newrepo
+
+In repositories that already have largefiles in them, any new file
+over 10MB will automatically be added as a largefile. To change this
+threshold, set ``largefiles.minsize`` in your Mercurial config file
+to the minimum size in megabytes to track as a largefile, or use the
+--lfsize option to the add command (also in megabytes)::
+
+ [largefiles]
+ minsize = 2
+
+ $ hg add --lfsize 2
+
+The ``largefiles.patterns`` config option allows you to specify a list
+of filename patterns (see :hg:`help patterns`) that should always be
+tracked as largefiles::
+
+ [largefiles]
+ patterns =
+ *.jpg
+ re:.*\.(png|bmp)$
+ library.zip
+ content/audio/*
+
+Files that match one of these patterns will be added as largefiles
+regardless of their size.
+
+The ``largefiles.minsize`` and ``largefiles.patterns`` config options
+will be ignored for any repositories not already containing a
+largefile. To add the first largefile to a repository, you must
+explicitly do so with the --large flag passed to the :hg:`add`
+command.
+'''
+
+from mercurial import commands
+
+import lfcommands
+import reposetup
+import uisetup
+
+testedwith = 'internal'
+
+reposetup = reposetup.reposetup
+uisetup = uisetup.uisetup
+
+commands.norepo += " lfconvert"
+
+cmdtable = lfcommands.cmdtable
diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py
new file mode 100644
index 0000000..55aa4a0
--- /dev/null
+++ b/hgext/largefiles/basestore.py
@@ -0,0 +1,195 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''base class for store implementations and store-related utility code'''
+
+import binascii
+import re
+
+from mercurial import util, node, hg
+from mercurial.i18n import _
+
+import lfutil
+
+class StoreError(Exception):
+ '''Raised when there is a problem getting files from or putting
+ files to a central store.'''
+ def __init__(self, filename, hash, url, detail):
+ self.filename = filename
+ self.hash = hash
+ self.url = url
+ self.detail = detail
+
+ def longmessage(self):
+ if self.url:
+ return ('%s: %s\n'
+ '(failed URL: %s)\n'
+ % (self.filename, self.detail, self.url))
+ else:
+ return ('%s: %s\n'
+ '(no default or default-push path set in hgrc)\n'
+ % (self.filename, self.detail))
+
+ def __str__(self):
+ return "%s: %s" % (self.url, self.detail)
+
+class basestore(object):
+ def __init__(self, ui, repo, url):
+ self.ui = ui
+ self.repo = repo
+ self.url = url
+
+ def put(self, source, hash):
+ '''Put source file into the store under <filename>/<hash>.'''
+ raise NotImplementedError('abstract method')
+
+ def exists(self, hashes):
+ '''Check to see if the store contains the given hashes.'''
+ raise NotImplementedError('abstract method')
+
+ def get(self, files):
+ '''Get the specified largefiles from the store and write to local
+ files under repo.root. files is a list of (filename, hash)
+ tuples. Return (success, missing), lists of files successfuly
+ downloaded and those not found in the store. success is a list
+ of (filename, hash) tuples; missing is a list of filenames that
+ we could not get. (The detailed error message will already have
+ been presented to the user, so missing is just supplied as a
+ summary.)'''
+ success = []
+ missing = []
+ ui = self.ui
+
+ at = 0
+ for filename, hash in files:
+ ui.progress(_('getting largefiles'), at, unit='lfile',
+ total=len(files))
+ at += 1
+ ui.note(_('getting %s:%s\n') % (filename, hash))
+
+ storefilename = lfutil.storepath(self.repo, hash)
+ tmpfile = util.atomictempfile(storefilename,
+ createmode=self.repo.store.createmode)
+
+ try:
+ hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
+ except StoreError, err:
+ ui.warn(err.longmessage())
+ hhash = ""
+
+ if hhash != hash:
+ if hhash != "":
+ ui.warn(_('%s: data corruption (expected %s, got %s)\n')
+ % (filename, hash, hhash))
+ tmpfile.discard() # no-op if it's already closed
+ missing.append(filename)
+ continue
+
+ tmpfile.close()
+ lfutil.linktousercache(self.repo, hash)
+ success.append((filename, hhash))
+
+ ui.progress(_('getting largefiles'), None)
+ return (success, missing)
+
+ def verify(self, revs, contents=False):
+ '''Verify the existence (and, optionally, contents) of every big
+ file revision referenced by every changeset in revs.
+ Return 0 if all is well, non-zero on any errors.'''
+ write = self.ui.write
+ failed = False
+
+ write(_('searching %d changesets for largefiles\n') % len(revs))
+ verified = set() # set of (filename, filenode) tuples
+
+ for rev in revs:
+ cctx = self.repo[rev]
+ cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
+
+ failed = util.any(self._verifyfile(
+ cctx, cset, contents, standin, verified) for standin in cctx)
+
+ numrevs = len(verified)
+ numlfiles = len(set([fname for (fname, fnode) in verified]))
+ if contents:
+ write(_('verified contents of %d revisions of %d largefiles\n')
+ % (numrevs, numlfiles))
+ else:
+ write(_('verified existence of %d revisions of %d largefiles\n')
+ % (numrevs, numlfiles))
+
+ return int(failed)
+
+ def _getfile(self, tmpfile, filename, hash):
+ '''Fetch one revision of one file from the store and write it
+ to tmpfile. Compute the hash of the file on-the-fly as it
+ downloads and return the binary hash. Close tmpfile. Raise
+ StoreError if unable to download the file (e.g. it does not
+ exist in the store).'''
+ raise NotImplementedError('abstract method')
+
+ def _verifyfile(self, cctx, cset, contents, standin, verified):
+ '''Perform the actual verification of a file in the store.
+ '''
+ raise NotImplementedError('abstract method')
+
+import localstore, wirestore
+
+_storeprovider = {
+ 'file': [localstore.localstore],
+ 'http': [wirestore.wirestore],
+ 'https': [wirestore.wirestore],
+ 'ssh': [wirestore.wirestore],
+ }
+
+_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
+
+# During clone this function is passed the src's ui object
+# but it needs the dest's ui object so it can read out of
+# the config file. Use repo.ui instead.
+def _openstore(repo, remote=None, put=False):
+ ui = repo.ui
+
+ if not remote:
+ lfpullsource = getattr(repo, 'lfpullsource', None)
+ if lfpullsource:
+ path = ui.expandpath(lfpullsource)
+ else:
+ path = ui.expandpath('default-push', 'default')
+
+ # ui.expandpath() leaves 'default-push' and 'default' alone if
+ # they cannot be expanded: fallback to the empty string,
+ # meaning the current directory.
+ if path == 'default-push' or path == 'default':
+ path = ''
+ remote = repo
+ else:
+ remote = hg.peer(repo, {}, path)
+
+ # The path could be a scheme so use Mercurial's normal functionality
+ # to resolve the scheme to a repository and use its path
+ path = util.safehasattr(remote, 'url') and remote.url() or remote.path
+
+ match = _scheme_re.match(path)
+ if not match: # regular filesystem path
+ scheme = 'file'
+ else:
+ scheme = match.group(1)
+
+ try:
+ storeproviders = _storeprovider[scheme]
+ except KeyError:
+ raise util.Abort(_('unsupported URL scheme %r') % scheme)
+
+ for classobj in storeproviders:
+ try:
+ return classobj(ui, repo, remote)
+ except lfutil.storeprotonotcapable:
+ pass
+
+ raise util.Abort(_('%s does not appear to be a largefile store') % path)
diff --git a/hgext/largefiles/lfcommands.py b/hgext/largefiles/lfcommands.py
new file mode 100644
index 0000000..de42edd
--- /dev/null
+++ b/hgext/largefiles/lfcommands.py
@@ -0,0 +1,549 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''High-level command function for lfconvert, plus the cmdtable.'''
+
+import os
+import shutil
+
+from mercurial import util, match as match_, hg, node, context, error, \
+ cmdutil, scmutil
+from mercurial.i18n import _
+from mercurial.lock import release
+
+import lfutil
+import basestore
+
+# -- Commands ----------------------------------------------------------
+
+def lfconvert(ui, src, dest, *pats, **opts):
+ '''convert a normal repository to a largefiles repository
+
+ Convert repository SOURCE to a new repository DEST, identical to
+ SOURCE except that certain files will be converted as largefiles:
+ specifically, any file that matches any PATTERN *or* whose size is
+ above the minimum size threshold is converted as a largefile. The
+ size used to determine whether or not to track a file as a
+ largefile is the size of the first version of the file. The
+ minimum size can be specified either with --size or in
+ configuration as ``largefiles.size``.
+
+ After running this command you will need to make sure that
+ largefiles is enabled anywhere you intend to push the new
+ repository.
+
+ Use --to-normal to convert largefiles back to normal files; after
+ this, the DEST repository can be used without largefiles at all.'''
+
+ if opts['to_normal']:
+ tolfile = False
+ else:
+ tolfile = True
+ size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
+
+ if not hg.islocal(src):
+ raise util.Abort(_('%s is not a local Mercurial repo') % src)
+ if not hg.islocal(dest):
+ raise util.Abort(_('%s is not a local Mercurial repo') % dest)
+
+ rsrc = hg.repository(ui, src)
+ ui.status(_('initializing destination %s\n') % dest)
+ rdst = hg.repository(ui, dest, create=True)
+
+ success = False
+ dstwlock = dstlock = None
+ try:
+ # Lock destination to prevent modification while it is converted to.
+ # Don't need to lock src because we are just reading from its history
+ # which can't change.
+ dstwlock = rdst.wlock()
+ dstlock = rdst.lock()
+
+ # Get a list of all changesets in the source. The easy way to do this
+ # is to simply walk the changelog, using changelog.nodesbewteen().
+ # Take a look at mercurial/revlog.py:639 for more details.
+ # Use a generator instead of a list to decrease memory usage
+ ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
+ rsrc.heads())[0])
+ revmap = {node.nullid: node.nullid}
+ if tolfile:
+ lfiles = set()
+ normalfiles = set()
+ if not pats:
+ pats = ui.configlist(lfutil.longname, 'patterns', default=[])
+ if pats:
+ matcher = match_.match(rsrc.root, '', list(pats))
+ else:
+ matcher = None
+
+ lfiletohash = {}
+ for ctx in ctxs:
+ ui.progress(_('converting revisions'), ctx.rev(),
+ unit=_('revision'), total=rsrc['tip'].rev())
+ _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
+ lfiles, normalfiles, matcher, size, lfiletohash)
+ ui.progress(_('converting revisions'), None)
+
+ if os.path.exists(rdst.wjoin(lfutil.shortname)):
+ shutil.rmtree(rdst.wjoin(lfutil.shortname))
+
+ for f in lfiletohash.keys():
+ if os.path.isfile(rdst.wjoin(f)):
+ os.unlink(rdst.wjoin(f))
+ try:
+ os.removedirs(os.path.dirname(rdst.wjoin(f)))
+ except OSError:
+ pass
+
+ # If there were any files converted to largefiles, add largefiles
+ # to the destination repository's requirements.
+ if lfiles:
+ rdst.requirements.add('largefiles')
+ rdst._writerequirements()
+ else:
+ for ctx in ctxs:
+ ui.progress(_('converting revisions'), ctx.rev(),
+ unit=_('revision'), total=rsrc['tip'].rev())
+ _addchangeset(ui, rsrc, rdst, ctx, revmap)
+
+ ui.progress(_('converting revisions'), None)
+ success = True
+ finally:
+ rdst.dirstate.clear()
+ release(dstlock, dstwlock)
+ if not success:
+ # we failed, remove the new directory
+ shutil.rmtree(rdst.root)
+
+def _addchangeset(ui, rsrc, rdst, ctx, revmap):
+ # Convert src parents to dst parents
+ parents = _convertparents(ctx, revmap)
+
+ # Generate list of changed files
+ files = _getchangedfiles(ctx, parents)
+
+ def getfilectx(repo, memctx, f):
+ if lfutil.standin(f) in files:
+ # if the file isn't in the manifest then it was removed
+ # or renamed, raise IOError to indicate this
+ try:
+ fctx = ctx.filectx(lfutil.standin(f))
+ except error.LookupError:
+ raise IOError
+ renamed = fctx.renamed()
+ if renamed:
+ renamed = lfutil.splitstandin(renamed[0])
+
+ hash = fctx.data().strip()
+ path = lfutil.findfile(rsrc, hash)
+ ### TODO: What if the file is not cached?
+ data = ''
+ fd = None
+ try:
+ fd = open(path, 'rb')
+ data = fd.read()
+ finally:
+ if fd:
+ fd.close()
+ return context.memfilectx(f, data, 'l' in fctx.flags(),
+ 'x' in fctx.flags(), renamed)
+ else:
+ return _getnormalcontext(repo.ui, ctx, f, revmap)
+
+ dstfiles = []
+ for file in files:
+ if lfutil.isstandin(file):
+ dstfiles.append(lfutil.splitstandin(file))
+ else:
+ dstfiles.append(file)
+ # Commit
+ _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
+
+def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
+ matcher, size, lfiletohash):
+ # Convert src parents to dst parents
+ parents = _convertparents(ctx, revmap)
+
+ # Generate list of changed files
+ files = _getchangedfiles(ctx, parents)
+
+ dstfiles = []
+ for f in files:
+ if f not in lfiles and f not in normalfiles:
+ islfile = _islfile(f, ctx, matcher, size)
+ # If this file was renamed or copied then copy
+ # the lfileness of its predecessor
+ if f in ctx.manifest():
+ fctx = ctx.filectx(f)
+ renamed = fctx.renamed()
+ renamedlfile = renamed and renamed[0] in lfiles
+ islfile |= renamedlfile
+ if 'l' in fctx.flags():
+ if renamedlfile:
+ raise util.Abort(
+ _('renamed/copied largefile %s becomes symlink')
+ % f)
+ islfile = False
+ if islfile:
+ lfiles.add(f)
+ else:
+ normalfiles.add(f)
+
+ if f in lfiles:
+ dstfiles.append(lfutil.standin(f))
+ # largefile in manifest if it has not been removed/renamed
+ if f in ctx.manifest():
+ fctx = ctx.filectx(f)
+ if 'l' in fctx.flags():
+ renamed = fctx.renamed()
+ if renamed and renamed[0] in lfiles:
+ raise util.Abort(_('largefile %s becomes symlink') % f)
+
+ # largefile was modified, update standins
+ fullpath = rdst.wjoin(f)
+ util.makedirs(os.path.dirname(fullpath))
+ m = util.sha1('')
+ m.update(ctx[f].data())
+ hash = m.hexdigest()
+ if f not in lfiletohash or lfiletohash[f] != hash:
+ try:
+ fd = open(fullpath, 'wb')
+ fd.write(ctx[f].data())
+ finally:
+ if fd:
+ fd.close()
+ executable = 'x' in ctx[f].flags()
+ os.chmod(fullpath, lfutil.getmode(executable))
+ lfutil.writestandin(rdst, lfutil.standin(f), hash,
+ executable)
+ lfiletohash[f] = hash
+ else:
+ # normal file
+ dstfiles.append(f)
+
+ def getfilectx(repo, memctx, f):
+ if lfutil.isstandin(f):
+ # if the file isn't in the manifest then it was removed
+ # or renamed, raise IOError to indicate this
+ srcfname = lfutil.splitstandin(f)
+ try:
+ fctx = ctx.filectx(srcfname)
+ except error.LookupError:
+ raise IOError
+ renamed = fctx.renamed()
+ if renamed:
+ # standin is always a largefile because largefile-ness
+ # doesn't change after rename or copy
+ renamed = lfutil.standin(renamed[0])
+
+ return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
+ fctx.flags(), 'x' in fctx.flags(), renamed)
+ else:
+ return _getnormalcontext(repo.ui, ctx, f, revmap)
+
+ # Commit
+ _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
+
+def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
+ mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
+ getfilectx, ctx.user(), ctx.date(), ctx.extra())
+ ret = rdst.commitctx(mctx)
+ rdst.setparents(ret)
+ revmap[ctx.node()] = rdst.changelog.tip()
+
+# Generate list of changed files
+def _getchangedfiles(ctx, parents):
+ files = set(ctx.files())
+ if node.nullid not in parents:
+ mc = ctx.manifest()
+ mp1 = ctx.parents()[0].manifest()
+ mp2 = ctx.parents()[1].manifest()
+ files |= (set(mp1) | set(mp2)) - set(mc)
+ for f in mc:
+ if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+ files.add(f)
+ return files
+
+# Convert src parents to dst parents
+def _convertparents(ctx, revmap):
+ parents = []
+ for p in ctx.parents():
+ parents.append(revmap[p.node()])
+ while len(parents) < 2:
+ parents.append(node.nullid)
+ return parents
+
+# Get memfilectx for a normal file
+def _getnormalcontext(ui, ctx, f, revmap):
+ try:
+ fctx = ctx.filectx(f)
+ except error.LookupError:
+ raise IOError
+ renamed = fctx.renamed()
+ if renamed:
+ renamed = renamed[0]
+
+ data = fctx.data()
+ if f == '.hgtags':
+ data = _converttags (ui, revmap, data)
+ return context.memfilectx(f, data, 'l' in fctx.flags(),
+ 'x' in fctx.flags(), renamed)
+
+# Remap tag data using a revision map
+def _converttags(ui, revmap, data):
+ newdata = []
+ for line in data.splitlines():
+ try:
+ id, name = line.split(' ', 1)
+ except ValueError:
+ ui.warn(_('skipping incorrectly formatted tag %s\n'
+ % line))
+ continue
+ try:
+ newid = node.bin(id)
+ except TypeError:
+ ui.warn(_('skipping incorrectly formatted id %s\n'
+ % id))
+ continue
+ try:
+ newdata.append('%s %s\n' % (node.hex(revmap[newid]),
+ name))
+ except KeyError:
+ ui.warn(_('no mapping for id %s\n') % id)
+ continue
+ return ''.join(newdata)
+
+def _islfile(file, ctx, matcher, size):
+ '''Return true if file should be considered a largefile, i.e.
+ matcher matches it or it is larger than size.'''
+ # never store special .hg* files as largefiles
+ if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
+ return False
+ if matcher and matcher(file):
+ return True
+ try:
+ return ctx.filectx(file).size() >= size * 1024 * 1024
+ except error.LookupError:
+ return False
+
+def uploadlfiles(ui, rsrc, rdst, files):
+ '''upload largefiles to the central store'''
+
+ if not files:
+ return
+
+ store = basestore._openstore(rsrc, rdst, put=True)
+
+ at = 0
+ ui.debug("sending statlfile command for %d largefiles\n" % len(files))
+ retval = store.exists(files)
+ files = filter(lambda h: not retval[h], files)
+ ui.debug("%d largefiles need to be uploaded\n" % len(files))
+
+ for hash in files:
+ ui.progress(_('uploading largefiles'), at, unit='largefile',
+ total=len(files))
+ source = lfutil.findfile(rsrc, hash)
+ if not source:
+ raise util.Abort(_('largefile %s missing from store'
+ ' (needs to be uploaded)') % hash)
+ # XXX check for errors here
+ store.put(source, hash)
+ at += 1
+ ui.progress(_('uploading largefiles'), None)
+
+def verifylfiles(ui, repo, all=False, contents=False):
+ '''Verify that every big file revision in the current changeset
+ exists in the central store. With --contents, also verify that
+ the contents of each big file revision are correct (SHA-1 hash
+ matches the revision ID). With --all, check every changeset in
+ this repository.'''
+ if all:
+ # Pass a list to the function rather than an iterator because we know a
+ # list will work.
+ revs = range(len(repo))
+ else:
+ revs = ['.']
+
+ store = basestore._openstore(repo)
+ return store.verify(revs, contents=contents)
+
+def cachelfiles(ui, repo, node, filelist=None):
+ '''cachelfiles ensures that all largefiles needed by the specified revision
+ are present in the repository's largefile cache.
+
+ returns a tuple (cached, missing). cached is the list of files downloaded
+ by this operation; missing is the list of files that were needed but could
+ not be found.'''
+ lfiles = lfutil.listlfiles(repo, node)
+ if filelist:
+ lfiles = set(lfiles) & set(filelist)
+ toget = []
+
+ for lfile in lfiles:
+ # If we are mid-merge, then we have to trust the standin that is in the
+ # working copy to have the correct hashvalue. This is because the
+ # original hg.merge() already updated the standin as part of the normal
+ # merge process -- we just have to udpate the largefile to match.
+ if (getattr(repo, "_ismerging", False) and
+ os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
+ expectedhash = lfutil.readstandin(repo, lfile)
+ else:
+ expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
+
+ # if it exists and its hash matches, it might have been locally
+ # modified before updating and the user chose 'local'. in this case,
+ # it will not be in any store, so don't look for it.
+ if ((not os.path.exists(repo.wjoin(lfile)) or
+ expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
+ not lfutil.findfile(repo, expectedhash)):
+ toget.append((lfile, expectedhash))
+
+ if toget:
+ store = basestore._openstore(repo)
+ ret = store.get(toget)
+ return ret
+
+ return ([], [])
+
+def downloadlfiles(ui, repo, rev=None):
+ matchfn = scmutil.match(repo[None],
+ [repo.wjoin(lfutil.shortname)], {})
+ def prepare(ctx, fns):
+ pass
+ totalsuccess = 0
+ totalmissing = 0
+ for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
+ prepare):
+ success, missing = cachelfiles(ui, repo, ctx.node())
+ totalsuccess += len(success)
+ totalmissing += len(missing)
+ ui.status(_("%d additional largefiles cached\n") % totalsuccess)
+ if totalmissing > 0:
+ ui.status(_("%d largefiles failed to download\n") % totalmissing)
+ return totalsuccess, totalmissing
+
+def updatelfiles(ui, repo, filelist=None, printmessage=True):
+ wlock = repo.wlock()
+ try:
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
+
+ if filelist is not None:
+ lfiles = [f for f in lfiles if f in filelist]
+
+ printed = False
+ if printmessage and lfiles:
+ ui.status(_('getting changed largefiles\n'))
+ printed = True
+ cachelfiles(ui, repo, '.', lfiles)
+
+ updated, removed = 0, 0
+ for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
+ # increment the appropriate counter according to _updatelfile's
+ # return value
+ updated += i > 0 and i or 0
+ removed -= i < 0 and i or 0
+ if printmessage and (removed or updated) and not printed:
+ ui.status(_('getting changed largefiles\n'))
+ printed = True
+
+ lfdirstate.write()
+ if printed and printmessage:
+ ui.status(_('%d largefiles updated, %d removed\n') % (updated,
+ removed))
+ finally:
+ wlock.release()
+
+def _updatelfile(repo, lfdirstate, lfile):
+ '''updates a single largefile and copies the state of its standin from
+ the repository's dirstate to its state in the lfdirstate.
+
+ returns 1 if the file was modified, -1 if the file was removed, 0 if the
+ file was unchanged, and None if the needed largefile was missing from the
+ cache.'''
+ ret = 0
+ abslfile = repo.wjoin(lfile)
+ absstandin = repo.wjoin(lfutil.standin(lfile))
+ if os.path.exists(absstandin):
+ if os.path.exists(absstandin+'.orig'):
+ shutil.copyfile(abslfile, abslfile+'.orig')
+ expecthash = lfutil.readstandin(repo, lfile)
+ if (expecthash != '' and
+ (not os.path.exists(abslfile) or
+ expecthash != lfutil.hashfile(abslfile))):
+ if not lfutil.copyfromcache(repo, expecthash, lfile):
+ # use normallookup() to allocate entry in largefiles dirstate,
+ # because lack of it misleads lfilesrepo.status() into
+ # recognition that such cache missing files are REMOVED.
+ lfdirstate.normallookup(lfile)
+ return None # don't try to set the mode
+ else:
+ # Synchronize largefile dirstate to the last modified time of
+ # the file
+ lfdirstate.normal(lfile)
+ ret = 1
+ mode = os.stat(absstandin).st_mode
+ if mode != os.stat(abslfile).st_mode:
+ os.chmod(abslfile, mode)
+ ret = 1
+ else:
+ # Remove lfiles for which the standin is deleted, unless the
+ # lfile is added to the repository again. This happens when a
+ # largefile is converted back to a normal file: the standin
+ # disappears, but a new (normal) file appears as the lfile.
+ if os.path.exists(abslfile) and lfile not in repo[None]:
+ util.unlinkpath(abslfile)
+ ret = -1
+ state = repo.dirstate[lfutil.standin(lfile)]
+ if state == 'n':
+ # When rebasing, we need to synchronize the standin and the largefile,
+ # because otherwise the largefile will get reverted. But for commit's
+ # sake, we have to mark the file as unclean.
+ if getattr(repo, "_isrebasing", False):
+ lfdirstate.normallookup(lfile)
+ else:
+ lfdirstate.normal(lfile)
+ elif state == 'r':
+ lfdirstate.remove(lfile)
+ elif state == 'a':
+ lfdirstate.add(lfile)
+ elif state == '?':
+ lfdirstate.drop(lfile)
+ return ret
+
+def catlfile(repo, lfile, rev, filename):
+ hash = lfutil.readstandin(repo, lfile, rev)
+ if not lfutil.inusercache(repo.ui, hash):
+ store = basestore._openstore(repo)
+ success, missing = store.get([(lfile, hash)])
+ if len(success) != 1:
+ raise util.Abort(
+ _('largefile %s is not in cache and could not be downloaded')
+ % lfile)
+ path = lfutil.usercachepath(repo.ui, hash)
+ fpout = cmdutil.makefileobj(repo, filename)
+ fpin = open(path, "rb")
+ fpout.write(fpin.read())
+ fpout.close()
+ fpin.close()
+ return 0
+
+# -- hg commands declarations ------------------------------------------------
+
+cmdtable = {
+ 'lfconvert': (lfconvert,
+ [('s', 'size', '',
+ _('minimum size (MB) for files to be converted '
+ 'as largefiles'),
+ 'SIZE'),
+ ('', 'to-normal', False,
+ _('convert from a largefiles repo to a normal repo')),
+ ],
+ _('hg lfconvert SOURCE DEST [FILE ...]')),
+ }
diff --git a/hgext/largefiles/lfutil.py b/hgext/largefiles/lfutil.py
new file mode 100644
index 0000000..6a64d89
--- /dev/null
+++ b/hgext/largefiles/lfutil.py
@@ -0,0 +1,467 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''largefiles utility code: must not import other modules in this package.'''
+
+import os
+import errno
+import platform
+import shutil
+import stat
+
+from mercurial import dirstate, httpconnection, match as match_, util, scmutil
+from mercurial.i18n import _
+
+shortname = '.hglf'
+longname = 'largefiles'
+
+
+# -- Portability wrappers ----------------------------------------------
+
+def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
+ return dirstate.walk(matcher, [], unknown, ignored)
+
+def repoadd(repo, list):
+ add = repo[None].add
+ return add(list)
+
+def reporemove(repo, list, unlink=False):
+ def remove(list, unlink):
+ wlock = repo.wlock()
+ try:
+ if unlink:
+ for f in list:
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ repo[None].forget(list)
+ finally:
+ wlock.release()
+ return remove(list, unlink=unlink)
+
+def repoforget(repo, list):
+ forget = repo[None].forget
+ return forget(list)
+
+def findoutgoing(repo, remote, force):
+ from mercurial import discovery
+ common, _anyinc, _heads = discovery.findcommonincoming(repo,
+ remote.peer(), force=force)
+ return repo.changelog.findmissing(common)
+
+# -- Private worker functions ------------------------------------------
+
+def getminsize(ui, assumelfiles, opt, default=10):
+ lfsize = opt
+ if not lfsize and assumelfiles:
+ lfsize = ui.config(longname, 'minsize', default=default)
+ if lfsize:
+ try:
+ lfsize = float(lfsize)
+ except ValueError:
+ raise util.Abort(_('largefiles: size must be number (not %s)\n')
+ % lfsize)
+ if lfsize is None:
+ raise util.Abort(_('minimum size for largefiles must be specified'))
+ return lfsize
+
+def link(src, dest):
+ try:
+ util.oslink(src, dest)
+ except OSError:
+ # if hardlinks fail, fallback on atomic copy
+ dst = util.atomictempfile(dest)
+ for chunk in util.filechunkiter(open(src, 'rb')):
+ dst.write(chunk)
+ dst.close()
+ os.chmod(dest, os.stat(src).st_mode)
+
+def usercachepath(ui, hash):
+ path = ui.configpath(longname, 'usercache', None)
+ if path:
+ path = os.path.join(path, hash)
+ else:
+ if os.name == 'nt':
+ appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
+ if appdata:
+ path = os.path.join(appdata, longname, hash)
+ elif platform.system() == 'Darwin':
+ home = os.getenv('HOME')
+ if home:
+ path = os.path.join(home, 'Library', 'Caches',
+ longname, hash)
+ elif os.name == 'posix':
+ path = os.getenv('XDG_CACHE_HOME')
+ if path:
+ path = os.path.join(path, longname, hash)
+ else:
+ home = os.getenv('HOME')
+ if home:
+ path = os.path.join(home, '.cache', longname, hash)
+ else:
+ raise util.Abort(_('unknown operating system: %s\n') % os.name)
+ return path
+
+def inusercache(ui, hash):
+ path = usercachepath(ui, hash)
+ return path and os.path.exists(path)
+
+def findfile(repo, hash):
+ if instore(repo, hash):
+ repo.ui.note(_('found %s in store\n') % hash)
+ return storepath(repo, hash)
+ elif inusercache(repo.ui, hash):
+ repo.ui.note(_('found %s in system cache\n') % hash)
+ path = storepath(repo, hash)
+ util.makedirs(os.path.dirname(path))
+ link(usercachepath(repo.ui, hash), path)
+ return path
+ return None
+
+class largefilesdirstate(dirstate.dirstate):
+ def __getitem__(self, key):
+ return super(largefilesdirstate, self).__getitem__(unixpath(key))
+ def normal(self, f):
+ return super(largefilesdirstate, self).normal(unixpath(f))
+ def remove(self, f):
+ return super(largefilesdirstate, self).remove(unixpath(f))
+ def add(self, f):
+ return super(largefilesdirstate, self).add(unixpath(f))
+ def drop(self, f):
+ return super(largefilesdirstate, self).drop(unixpath(f))
+ def forget(self, f):
+ return super(largefilesdirstate, self).forget(unixpath(f))
+ def normallookup(self, f):
+ return super(largefilesdirstate, self).normallookup(unixpath(f))
+
+def openlfdirstate(ui, repo):
+ '''
+ Return a dirstate object that tracks largefiles: i.e. its root is
+ the repo root, but it is saved in .hg/largefiles/dirstate.
+ '''
+ admin = repo.join(longname)
+ opener = scmutil.opener(admin)
+ lfdirstate = largefilesdirstate(opener, ui, repo.root,
+ repo.dirstate._validate)
+
+ # If the largefiles dirstate does not exist, populate and create
+ # it. This ensures that we create it on the first meaningful
+ # largefiles operation in a new clone.
+ if not os.path.exists(os.path.join(admin, 'dirstate')):
+ util.makedirs(admin)
+ matcher = getstandinmatcher(repo)
+ for standin in dirstatewalk(repo.dirstate, matcher):
+ lfile = splitstandin(standin)
+ hash = readstandin(repo, lfile)
+ lfdirstate.normallookup(lfile)
+ try:
+ if hash == hashfile(repo.wjoin(lfile)):
+ lfdirstate.normal(lfile)
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return lfdirstate
+
+def lfdirstatestatus(lfdirstate, repo, rev):
+ match = match_.always(repo.root, repo.getcwd())
+ s = lfdirstate.status(match, [], False, False, False)
+ unsure, modified, added, removed, missing, unknown, ignored, clean = s
+ for lfile in unsure:
+ if repo[rev][standin(lfile)].data().strip() != \
+ hashfile(repo.wjoin(lfile)):
+ modified.append(lfile)
+ else:
+ clean.append(lfile)
+ lfdirstate.normal(lfile)
+ return (modified, added, removed, missing, unknown, ignored, clean)
+
+def listlfiles(repo, rev=None, matcher=None):
+ '''return a list of largefiles in the working copy or the
+ specified changeset'''
+
+ if matcher is None:
+ matcher = getstandinmatcher(repo)
+
+ # ignore unknown files in working directory
+ return [splitstandin(f)
+ for f in repo[rev].walk(matcher)
+ if rev is not None or repo.dirstate[f] != '?']
+
+def instore(repo, hash):
+ return os.path.exists(storepath(repo, hash))
+
+def storepath(repo, hash):
+ return repo.join(os.path.join(longname, hash))
+
+def copyfromcache(repo, hash, filename):
+ '''Copy the specified largefile from the repo or system cache to
+ filename in the repository. Return true on success or false if the
+ file was not found in either cache (which should not happened:
+ this is meant to be called only after ensuring that the needed
+ largefile exists in the cache).'''
+ path = findfile(repo, hash)
+ if path is None:
+ return False
+ util.makedirs(os.path.dirname(repo.wjoin(filename)))
+ # The write may fail before the file is fully written, but we
+ # don't use atomic writes in the working copy.
+ shutil.copy(path, repo.wjoin(filename))
+ return True
+
+def copytostore(repo, rev, file, uploaded=False):
+ hash = readstandin(repo, file)
+ if instore(repo, hash):
+ return
+ copytostoreabsolute(repo, repo.wjoin(file), hash)
+
+def copyalltostore(repo, node):
+ '''Copy all largefiles in a given revision to the store'''
+
+ ctx = repo[node]
+ for filename in ctx.files():
+ if isstandin(filename) and filename in ctx.manifest():
+ realfile = splitstandin(filename)
+ copytostore(repo, ctx.node(), realfile)
+
+
+def copytostoreabsolute(repo, file, hash):
+ util.makedirs(os.path.dirname(storepath(repo, hash)))
+ if inusercache(repo.ui, hash):
+ link(usercachepath(repo.ui, hash), storepath(repo, hash))
+ else:
+ dst = util.atomictempfile(storepath(repo, hash),
+ createmode=repo.store.createmode)
+ for chunk in util.filechunkiter(open(file, 'rb')):
+ dst.write(chunk)
+ dst.close()
+ linktousercache(repo, hash)
+
+def linktousercache(repo, hash):
+ path = usercachepath(repo.ui, hash)
+ if path:
+ util.makedirs(os.path.dirname(path))
+ link(storepath(repo, hash), path)
+
+def getstandinmatcher(repo, pats=[], opts={}):
+ '''Return a match object that applies pats to the standin directory'''
+ standindir = repo.pathto(shortname)
+ if pats:
+ # patterns supplied: search standin directory relative to current dir
+ cwd = repo.getcwd()
+ if os.path.isabs(cwd):
+ # cwd is an absolute path for hg -R <reponame>
+ # work relative to the repository root in this case
+ cwd = ''
+ pats = [os.path.join(standindir, cwd, pat) for pat in pats]
+ elif os.path.isdir(standindir):
+ # no patterns: relative to repo root
+ pats = [standindir]
+ else:
+ # no patterns and no standin dir: return matcher that matches nothing
+ match = match_.match(repo.root, None, [], exact=True)
+ match.matchfn = lambda f: False
+ return match
+ return getmatcher(repo, pats, opts, showbad=False)
+
+def getmatcher(repo, pats=[], opts={}, showbad=True):
+ '''Wrapper around scmutil.match() that adds showbad: if false,
+ neuter the match object's bad() method so it does not print any
+ warnings about missing files or directories.'''
+ match = scmutil.match(repo[None], pats, opts)
+
+ if not showbad:
+ match.bad = lambda f, msg: None
+ return match
+
+def composestandinmatcher(repo, rmatcher):
+ '''Return a matcher that accepts standins corresponding to the
+ files accepted by rmatcher. Pass the list of files in the matcher
+ as the paths specified by the user.'''
+ smatcher = getstandinmatcher(repo, rmatcher.files())
+ isstandin = smatcher.matchfn
+ def composedmatchfn(f):
+ return isstandin(f) and rmatcher.matchfn(splitstandin(f))
+ smatcher.matchfn = composedmatchfn
+
+ return smatcher
+
+def standin(filename):
+ '''Return the repo-relative path to the standin for the specified big
+ file.'''
+ # Notes:
+ # 1) Most callers want an absolute path, but _createstandin() needs
+ # it repo-relative so lfadd() can pass it to repoadd(). So leave
+ # it up to the caller to use repo.wjoin() to get an absolute path.
+ # 2) Join with '/' because that's what dirstate always uses, even on
+ # Windows. Change existing separator to '/' first in case we are
+ # passed filenames from an external source (like the command line).
+ return shortname + '/' + util.pconvert(filename)
+
+def isstandin(filename):
+ '''Return true if filename is a big file standin. filename must be
+ in Mercurial's internal form (slash-separated).'''
+ return filename.startswith(shortname + '/')
+
+def splitstandin(filename):
+ # Split on / because that's what dirstate always uses, even on Windows.
+ # Change local separator to / first just in case we are passed filenames
+ # from an external source (like the command line).
+ bits = util.pconvert(filename).split('/', 1)
+ if len(bits) == 2 and bits[0] == shortname:
+ return bits[1]
+ else:
+ return None
+
+def updatestandin(repo, standin):
+ file = repo.wjoin(splitstandin(standin))
+ if os.path.exists(file):
+ hash = hashfile(file)
+ executable = getexecutable(file)
+ writestandin(repo, standin, hash, executable)
+
+def readstandin(repo, filename, node=None):
+ '''read hex hash from standin for filename at given node, or working
+ directory if no node is given'''
+ return repo[node][standin(filename)].data().strip()
+
+def writestandin(repo, standin, hash, executable):
+ '''write hash to <repo.root>/<standin>'''
+ writehash(hash, repo.wjoin(standin), executable)
+
+def copyandhash(instream, outfile):
+ '''Read bytes from instream (iterable) and write them to outfile,
+ computing the SHA-1 hash of the data along the way. Close outfile
+ when done and return the binary hash.'''
+ hasher = util.sha1('')
+ for data in instream:
+ hasher.update(data)
+ outfile.write(data)
+
+ # Blecch: closing a file that somebody else opened is rude and
+ # wrong. But it's so darn convenient and practical! After all,
+ # outfile was opened just to copy and hash.
+ outfile.close()
+
+ return hasher.digest()
+
+def hashrepofile(repo, file):
+ return hashfile(repo.wjoin(file))
+
+def hashfile(file):
+ if not os.path.exists(file):
+ return ''
+ hasher = util.sha1('')
+ fd = open(file, 'rb')
+ for data in blockstream(fd):
+ hasher.update(data)
+ fd.close()
+ return hasher.hexdigest()
+
+class limitreader(object):
+ def __init__(self, f, limit):
+ self.f = f
+ self.limit = limit
+
+ def read(self, length):
+ if self.limit == 0:
+ return ''
+ length = length > self.limit and self.limit or length
+ self.limit -= length
+ return self.f.read(length)
+
+ def close(self):
+ pass
+
+def blockstream(infile, blocksize=128 * 1024):
+ """Generator that yields blocks of data from infile and closes infile."""
+ while True:
+ data = infile.read(blocksize)
+ if not data:
+ break
+ yield data
+ # same blecch as copyandhash() above
+ infile.close()
+
+def writehash(hash, filename, executable):
+ util.makedirs(os.path.dirname(filename))
+ util.writefile(filename, hash + '\n')
+ os.chmod(filename, getmode(executable))
+
+def getexecutable(filename):
+ mode = os.stat(filename).st_mode
+ return ((mode & stat.S_IXUSR) and
+ (mode & stat.S_IXGRP) and
+ (mode & stat.S_IXOTH))
+
+def getmode(executable):
+ if executable:
+ return 0755
+ else:
+ return 0644
+
+def urljoin(first, second, *arg):
+ def join(left, right):
+ if not left.endswith('/'):
+ left += '/'
+ if right.startswith('/'):
+ right = right[1:]
+ return left + right
+
+ url = join(first, second)
+ for a in arg:
+ url = join(url, a)
+ return url
+
+def hexsha1(data):
+ """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
+ object data"""
+ h = util.sha1()
+ for chunk in util.filechunkiter(data):
+ h.update(chunk)
+ return h.hexdigest()
+
+def httpsendfile(ui, filename):
+ return httpconnection.httpsendfile(ui, filename, 'rb')
+
+def unixpath(path):
+ '''Return a version of path normalized for use with the lfdirstate.'''
+ return util.pconvert(os.path.normpath(path))
+
+def islfilesrepo(repo):
+ return ('largefiles' in repo.requirements and
+ util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
+
+class storeprotonotcapable(Exception):
+ def __init__(self, storetypes):
+ self.storetypes = storetypes
+
+def getcurrentheads(repo):
+ branches = repo.branchmap()
+ heads = []
+ for branch in branches:
+ newheads = repo.branchheads(branch)
+ heads = heads + newheads
+ return heads
+
+def getstandinsstate(repo):
+ standins = []
+ matcher = getstandinmatcher(repo)
+ for standin in dirstatewalk(repo.dirstate, matcher):
+ lfile = splitstandin(standin)
+ standins.append((lfile, readstandin(repo, lfile)))
+ return standins
+
+def getlfilestoupdate(oldstandins, newstandins):
+ changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
+ filelist = []
+ for f in changedstandins:
+ if f[0] not in filelist:
+ filelist.append(f[0])
+ return filelist
diff --git a/hgext/largefiles/localstore.py b/hgext/largefiles/localstore.py
new file mode 100644
index 0000000..4995743
--- /dev/null
+++ b/hgext/largefiles/localstore.py
@@ -0,0 +1,82 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''store class for local filesystem'''
+
+import os
+
+from mercurial import util
+from mercurial.i18n import _
+
+import lfutil
+import basestore
+
+class localstore(basestore.basestore):
+ '''localstore first attempts to grab files out of the store in the remote
+ Mercurial repository. Failling that, it attempts to grab the files from
+ the user cache.'''
+
+ def __init__(self, ui, repo, remote):
+ url = os.path.join(remote.local().path, '.hg', lfutil.longname)
+ super(localstore, self).__init__(ui, repo, util.expandpath(url))
+ self.remote = remote.local()
+
+ def put(self, source, hash):
+ util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
+ if lfutil.instore(self.remote, hash):
+ return
+ lfutil.link(lfutil.storepath(self.repo, hash),
+ lfutil.storepath(self.remote, hash))
+
+ def exists(self, hash):
+ return lfutil.instore(self.remote, hash)
+
+ def _getfile(self, tmpfile, filename, hash):
+ if lfutil.instore(self.remote, hash):
+ path = lfutil.storepath(self.remote, hash)
+ elif lfutil.inusercache(self.ui, hash):
+ path = lfutil.usercachepath(self.ui, hash)
+ else:
+ raise basestore.StoreError(filename, hash, '',
+ _("can't get file locally"))
+ fd = open(path, 'rb')
+ try:
+ return lfutil.copyandhash(fd, tmpfile)
+ finally:
+ fd.close()
+
+ def _verifyfile(self, cctx, cset, contents, standin, verified):
+ filename = lfutil.splitstandin(standin)
+ if not filename:
+ return False
+ fctx = cctx[standin]
+ key = (filename, fctx.filenode())
+ if key in verified:
+ return False
+
+ expecthash = fctx.data()[0:40]
+ verified.add(key)
+ if not lfutil.instore(self.remote, expecthash):
+ self.ui.warn(
+ _('changeset %s: %s missing\n'
+ ' (looked for hash %s)\n')
+ % (cset, filename, expecthash))
+ return True # failed
+
+ if contents:
+ storepath = lfutil.storepath(self.remote, expecthash)
+ actualhash = lfutil.hashfile(storepath)
+ if actualhash != expecthash:
+ self.ui.warn(
+ _('changeset %s: %s: contents differ\n'
+ ' (%s:\n'
+ ' expected hash %s,\n'
+ ' but got %s)\n')
+ % (cset, filename, storepath, expecthash, actualhash))
+ return True # failed
+ return False
diff --git a/hgext/largefiles/overrides.py b/hgext/largefiles/overrides.py
new file mode 100644
index 0000000..3b42695
--- /dev/null
+++ b/hgext/largefiles/overrides.py
@@ -0,0 +1,1080 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''Overridden Mercurial commands and functions for the largefiles extension'''
+
+import os
+import copy
+
+from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
+ node, archival, error, merge
+from mercurial.i18n import _
+from mercurial.node import hex
+from hgext import rebase
+
+import lfutil
+import lfcommands
+
+# -- Utility functions: commonly/repeatedly needed functionality ---------------
+
+def installnormalfilesmatchfn(manifest):
+ '''overrides scmutil.match so that the matcher it returns will ignore all
+ largefiles'''
+ oldmatch = None # for the closure
+ def overridematch(ctx, pats=[], opts={}, globbed=False,
+ default='relpath'):
+ match = oldmatch(ctx, pats, opts, globbed, default)
+ m = copy.copy(match)
+ notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
+ manifest)
+ m._files = filter(notlfile, m._files)
+ m._fmap = set(m._files)
+ origmatchfn = m.matchfn
+ m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
+ return m
+ oldmatch = installmatchfn(overridematch)
+
+def installmatchfn(f):
+ oldmatch = scmutil.match
+ setattr(f, 'oldmatch', oldmatch)
+ scmutil.match = f
+ return oldmatch
+
+def restorematchfn():
+ '''restores scmutil.match to what it was before installnormalfilesmatchfn
+ was called. no-op if scmutil.match is its original function.
+
+ Note that n calls to installnormalfilesmatchfn will require n calls to
+ restore matchfn to reverse'''
+ scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
+
+def addlargefiles(ui, repo, *pats, **opts):
+ large = opts.pop('large', None)
+ lfsize = lfutil.getminsize(
+ ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
+
+ lfmatcher = None
+ if lfutil.islfilesrepo(repo):
+ lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
+ if lfpats:
+ lfmatcher = match_.match(repo.root, '', list(lfpats))
+
+ lfnames = []
+ m = scmutil.match(repo[None], pats, opts)
+ m.bad = lambda x, y: None
+ wctx = repo[None]
+ for f in repo.walk(m):
+ exact = m.exact(f)
+ lfile = lfutil.standin(f) in wctx
+ nfile = f in wctx
+ exists = lfile or nfile
+
+ # Don't warn the user when they attempt to add a normal tracked file.
+ # The normal add code will do that for us.
+ if exact and exists:
+ if lfile:
+ ui.warn(_('%s already a largefile\n') % f)
+ continue
+
+ if (exact or not exists) and not lfutil.isstandin(f):
+ wfile = repo.wjoin(f)
+
+ # In case the file was removed previously, but not committed
+ # (issue3507)
+ if not os.path.exists(wfile):
+ continue
+
+ abovemin = (lfsize and
+ os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
+ if large or abovemin or (lfmatcher and lfmatcher(f)):
+ lfnames.append(f)
+ if ui.verbose or not exact:
+ ui.status(_('adding %s as a largefile\n') % m.rel(f))
+
+ bad = []
+ standins = []
+
+ # Need to lock, otherwise there could be a race condition between
+ # when standins are created and added to the repo.
+ wlock = repo.wlock()
+ try:
+ if not opts.get('dry_run'):
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ for f in lfnames:
+ standinname = lfutil.standin(f)
+ lfutil.writestandin(repo, standinname, hash='',
+ executable=lfutil.getexecutable(repo.wjoin(f)))
+ standins.append(standinname)
+ if lfdirstate[f] == 'r':
+ lfdirstate.normallookup(f)
+ else:
+ lfdirstate.add(f)
+ lfdirstate.write()
+ bad += [lfutil.splitstandin(f)
+ for f in lfutil.repoadd(repo, standins)
+ if f in m.files()]
+ finally:
+ wlock.release()
+ return bad
+
+def removelargefiles(ui, repo, *pats, **opts):
+ after = opts.get('after')
+ if not pats and not after:
+ raise util.Abort(_('no files specified'))
+ m = scmutil.match(repo[None], pats, opts)
+ try:
+ repo.lfstatus = True
+ s = repo.status(match=m, clean=True)
+ finally:
+ repo.lfstatus = False
+ manifest = repo[None].manifest()
+ modified, added, deleted, clean = [[f for f in list
+ if lfutil.standin(f) in manifest]
+ for list in [s[0], s[1], s[3], s[6]]]
+
+ def warn(files, reason):
+ for f in files:
+ ui.warn(_('not removing %s: %s (use forget to undo)\n')
+ % (m.rel(f), reason))
+
+ if after:
+ remove, forget = deleted, []
+ warn(modified + added + clean, _('file still exists'))
+ else:
+ remove, forget = deleted + clean, []
+ warn(modified, _('file is modified'))
+ warn(added, _('file has been marked for add'))
+
+ for f in sorted(remove + forget):
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ # Need to lock because standin files are deleted then removed from the
+ # repository and we could race inbetween.
+ wlock = repo.wlock()
+ try:
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ for f in remove:
+ if not after:
+ # If this is being called by addremove, notify the user that we
+ # are removing the file.
+ if getattr(repo, "_isaddremove", False):
+ ui.status(_('removing %s\n') % f)
+ if os.path.exists(repo.wjoin(f)):
+ util.unlinkpath(repo.wjoin(f))
+ lfdirstate.remove(f)
+ lfdirstate.write()
+ forget = [lfutil.standin(f) for f in forget]
+ remove = [lfutil.standin(f) for f in remove]
+ lfutil.repoforget(repo, forget)
+ # If this is being called by addremove, let the original addremove
+ # function handle this.
+ if not getattr(repo, "_isaddremove", False):
+ lfutil.reporemove(repo, remove, unlink=True)
+ else:
+ lfutil.reporemove(repo, remove, unlink=False)
+ finally:
+ wlock.release()
+
+# For overriding mercurial.hgweb.webcommands so that largefiles will
+# appear at their right place in the manifests.
+def decodepath(orig, path):
+ return lfutil.splitstandin(path) or path
+
+# -- Wrappers: modify existing commands --------------------------------
+
+# Add works by going through the files that the user wanted to add and
+# checking if they should be added as largefiles. Then it makes a new
+# matcher which matches only the normal files and runs the original
+# version of add.
+def overrideadd(orig, ui, repo, *pats, **opts):
+ normal = opts.pop('normal')
+ if normal:
+ if opts.get('large'):
+ raise util.Abort(_('--normal cannot be used with --large'))
+ return orig(ui, repo, *pats, **opts)
+ bad = addlargefiles(ui, repo, *pats, **opts)
+ installnormalfilesmatchfn(repo[None].manifest())
+ result = orig(ui, repo, *pats, **opts)
+ restorematchfn()
+
+ return (result == 1 or bad) and 1 or 0
+
+def overrideremove(orig, ui, repo, *pats, **opts):
+ installnormalfilesmatchfn(repo[None].manifest())
+ orig(ui, repo, *pats, **opts)
+ restorematchfn()
+ removelargefiles(ui, repo, *pats, **opts)
+
+def overridestatusfn(orig, repo, rev2, **opts):
+ try:
+ repo._repo.lfstatus = True
+ return orig(repo, rev2, **opts)
+ finally:
+ repo._repo.lfstatus = False
+
+def overridestatus(orig, ui, repo, *pats, **opts):
+ try:
+ repo.lfstatus = True
+ return orig(ui, repo, *pats, **opts)
+ finally:
+ repo.lfstatus = False
+
+def overridedirty(orig, repo, ignoreupdate=False):
+ try:
+ repo._repo.lfstatus = True
+ return orig(repo, ignoreupdate)
+ finally:
+ repo._repo.lfstatus = False
+
+def overridelog(orig, ui, repo, *pats, **opts):
+ try:
+ repo.lfstatus = True
+ orig(ui, repo, *pats, **opts)
+ finally:
+ repo.lfstatus = False
+
+def overrideverify(orig, ui, repo, *pats, **opts):
+ large = opts.pop('large', False)
+ all = opts.pop('lfa', False)
+ contents = opts.pop('lfc', False)
+
+ result = orig(ui, repo, *pats, **opts)
+ if large:
+ result = result or lfcommands.verifylfiles(ui, repo, all, contents)
+ return result
+
+# Override needs to refresh standins so that update's normal merge
+# will go through properly. Then the other update hook (overriding repo.update)
+# will get the new files. Filemerge is also overriden so that the merge
+# will merge standins correctly.
+def overrideupdate(orig, ui, repo, *pats, **opts):
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
+ False, False)
+ (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+
+ # Need to lock between the standins getting updated and their
+ # largefiles getting updated
+ wlock = repo.wlock()
+ try:
+ if opts['check']:
+ mod = len(modified) > 0
+ for lfile in unsure:
+ standin = lfutil.standin(lfile)
+ if repo['.'][standin].data().strip() != \
+ lfutil.hashfile(repo.wjoin(lfile)):
+ mod = True
+ else:
+ lfdirstate.normal(lfile)
+ lfdirstate.write()
+ if mod:
+ raise util.Abort(_('uncommitted local changes'))
+ # XXX handle removed differently
+ if not opts['clean']:
+ for lfile in unsure + modified + added:
+ lfutil.updatestandin(repo, lfutil.standin(lfile))
+ finally:
+ wlock.release()
+ return orig(ui, repo, *pats, **opts)
+
+# Before starting the manifest merge, merge.updates will call
+# _checkunknown to check if there are any files in the merged-in
+# changeset that collide with unknown files in the working copy.
+#
+# The largefiles are seen as unknown, so this prevents us from merging
+# in a file 'foo' if we already have a largefile with the same name.
+#
+# The overridden function filters the unknown files by removing any
+# largefiles. This makes the merge proceed and we can then handle this
+# case further in the overridden manifestmerge function below.
+def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
+ if lfutil.standin(f) in wctx:
+ return False
+ return origfn(repo, wctx, mctx, f)
+
+# The manifest merge handles conflicts on the manifest level. We want
+# to handle changes in largefile-ness of files at this level too.
+#
+# The strategy is to run the original manifestmerge and then process
+# the action list it outputs. There are two cases we need to deal with:
+#
+# 1. Normal file in p1, largefile in p2. Here the largefile is
+# detected via its standin file, which will enter the working copy
+# with a "get" action. It is not "merge" since the standin is all
+# Mercurial is concerned with at this level -- the link to the
+# existing normal file is not relevant here.
+#
+# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
+# since the largefile will be present in the working copy and
+# different from the normal file in p2. Mercurial therefore
+# triggers a merge action.
+#
+# In both cases, we prompt the user and emit new actions to either
+# remove the standin (if the normal file was kept) or to remove the
+# normal file and get the standin (if the largefile was kept). The
+# default prompt answer is to use the largefile version since it was
+# presumably changed on purpose.
+#
+# Finally, the merge.applyupdates function will then take care of
+# writing the files into the working copy and lfcommands.updatelfiles
+# will update the largefiles.
+def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
+ actions = origfn(repo, p1, p2, pa, overwrite, partial)
+ processed = []
+
+ for action in actions:
+ if overwrite:
+ processed.append(action)
+ continue
+ f, m = action[:2]
+
+ choices = (_('&Largefile'), _('&Normal file'))
+ if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
+ # Case 1: normal file in the working copy, largefile in
+ # the second parent
+ lfile = lfutil.splitstandin(f)
+ standin = f
+ msg = _('%s has been turned into a largefile\n'
+ 'use (l)argefile or keep as (n)ormal file?') % lfile
+ if repo.ui.promptchoice(msg, choices, 0) == 0:
+ processed.append((lfile, "r"))
+ processed.append((standin, "g", p2.flags(standin)))
+ else:
+ processed.append((standin, "r"))
+ elif m == "g" and lfutil.standin(f) in p1 and f in p2:
+ # Case 2: largefile in the working copy, normal file in
+ # the second parent
+ standin = lfutil.standin(f)
+ lfile = f
+ msg = _('%s has been turned into a normal file\n'
+ 'keep as (l)argefile or use (n)ormal file?') % lfile
+ if repo.ui.promptchoice(msg, choices, 0) == 0:
+ processed.append((lfile, "r"))
+ else:
+ processed.append((standin, "r"))
+ processed.append((lfile, "g", p2.flags(lfile)))
+ else:
+ processed.append(action)
+
+ return processed
+
+# Override filemerge to prompt the user about how they wish to merge
+# largefiles. This will handle identical edits, and copy/rename +
+# edit without prompting the user.
+def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
+ # Use better variable names here. Because this is a wrapper we cannot
+ # change the variable names in the function declaration.
+ fcdest, fcother, fcancestor = fcd, fco, fca
+ if not lfutil.isstandin(orig):
+ return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
+ else:
+ if not fcother.cmp(fcdest): # files identical?
+ return None
+
+ # backwards, use working dir parent as ancestor
+ if fcancestor == fcother:
+ fcancestor = fcdest.parents()[0]
+
+ if orig != fcother.path():
+ repo.ui.status(_('merging %s and %s to %s\n')
+ % (lfutil.splitstandin(orig),
+ lfutil.splitstandin(fcother.path()),
+ lfutil.splitstandin(fcdest.path())))
+ else:
+ repo.ui.status(_('merging %s\n')
+ % lfutil.splitstandin(fcdest.path()))
+
+ if fcancestor.path() != fcother.path() and fcother.data() == \
+ fcancestor.data():
+ return 0
+ if fcancestor.path() != fcdest.path() and fcdest.data() == \
+ fcancestor.data():
+ repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
+ return 0
+
+ if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
+ 'keep (l)ocal or take (o)ther?') %
+ lfutil.splitstandin(orig),
+ (_('&Local'), _('&Other')), 0) == 0:
+ return 0
+ else:
+ repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
+ return 0
+
+# Copy first changes the matchers to match standins instead of
+# largefiles. Then it overrides util.copyfile in that function it
+# checks if the destination largefile already exists. It also keeps a
+# list of copied files so that the largefiles can be copied and the
+# dirstate updated.
+def overridecopy(orig, ui, repo, pats, opts, rename=False):
+ # doesn't remove largefile on rename
+ if len(pats) < 2:
+ # this isn't legal, let the original function deal with it
+ return orig(ui, repo, pats, opts, rename)
+
+ def makestandin(relpath):
+ path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
+ return os.path.join(repo.wjoin(lfutil.standin(path)))
+
+ fullpats = scmutil.expandpats(pats)
+ dest = fullpats[-1]
+
+ if os.path.isdir(dest):
+ if not os.path.isdir(makestandin(dest)):
+ os.makedirs(makestandin(dest))
+ # This could copy both lfiles and normal files in one command,
+ # but we don't want to do that. First replace their matcher to
+ # only match normal files and run it, then replace it to just
+ # match largefiles and run it again.
+ nonormalfiles = False
+ nolfiles = False
+ try:
+ try:
+ installnormalfilesmatchfn(repo[None].manifest())
+ result = orig(ui, repo, pats, opts, rename)
+ except util.Abort, e:
+ if str(e) != _('no files to copy'):
+ raise e
+ else:
+ nonormalfiles = True
+ result = 0
+ finally:
+ restorematchfn()
+
+ # The first rename can cause our current working directory to be removed.
+ # In that case there is nothing left to copy/rename so just quit.
+ try:
+ repo.getcwd()
+ except OSError:
+ return result
+
+ try:
+ try:
+ # When we call orig below it creates the standins but we don't add
+ # them to the dir state until later so lock during that time.
+ wlock = repo.wlock()
+
+ manifest = repo[None].manifest()
+ oldmatch = None # for the closure
+ def overridematch(ctx, pats=[], opts={}, globbed=False,
+ default='relpath'):
+ newpats = []
+ # The patterns were previously mangled to add the standin
+ # directory; we need to remove that now
+ for pat in pats:
+ if match_.patkind(pat) is None and lfutil.shortname in pat:
+ newpats.append(pat.replace(lfutil.shortname, ''))
+ else:
+ newpats.append(pat)
+ match = oldmatch(ctx, newpats, opts, globbed, default)
+ m = copy.copy(match)
+ lfile = lambda f: lfutil.standin(f) in manifest
+ m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
+ m._fmap = set(m._files)
+ origmatchfn = m.matchfn
+ m.matchfn = lambda f: (lfutil.isstandin(f) and
+ (f in manifest) and
+ origmatchfn(lfutil.splitstandin(f)) or
+ None)
+ return m
+ oldmatch = installmatchfn(overridematch)
+ listpats = []
+ for pat in pats:
+ if match_.patkind(pat) is not None:
+ listpats.append(pat)
+ else:
+ listpats.append(makestandin(pat))
+
+ try:
+ origcopyfile = util.copyfile
+ copiedfiles = []
+ def overridecopyfile(src, dest):
+ if (lfutil.shortname in src and
+ dest.startswith(repo.wjoin(lfutil.shortname))):
+ destlfile = dest.replace(lfutil.shortname, '')
+ if not opts['force'] and os.path.exists(destlfile):
+ raise IOError('',
+ _('destination largefile already exists'))
+ copiedfiles.append((src, dest))
+ origcopyfile(src, dest)
+
+ util.copyfile = overridecopyfile
+ result += orig(ui, repo, listpats, opts, rename)
+ finally:
+ util.copyfile = origcopyfile
+
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ for (src, dest) in copiedfiles:
+ if (lfutil.shortname in src and
+ dest.startswith(repo.wjoin(lfutil.shortname))):
+ srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
+ destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
+ destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
+ if not os.path.isdir(destlfiledir):
+ os.makedirs(destlfiledir)
+ if rename:
+ os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
+ lfdirstate.remove(srclfile)
+ else:
+ util.copyfile(repo.wjoin(srclfile),
+ repo.wjoin(destlfile))
+
+ lfdirstate.add(destlfile)
+ lfdirstate.write()
+ except util.Abort, e:
+ if str(e) != _('no files to copy'):
+ raise e
+ else:
+ nolfiles = True
+ finally:
+ restorematchfn()
+ wlock.release()
+
+ if nolfiles and nonormalfiles:
+ raise util.Abort(_('no files to copy'))
+
+ return result
+
+# When the user calls revert, we have to be careful to not revert any
+# changes to other largefiles accidentally. This means we have to keep
+# track of the largefiles that are being reverted so we only pull down
+# the necessary largefiles.
+#
+# Standins are only updated (to match the hash of largefiles) before
+# commits. Update the standins then run the original revert, changing
+# the matcher to hit standins instead of largefiles. Based on the
+# resulting standins update the largefiles. Then return the standins
+# to their proper state
+def overriderevert(orig, ui, repo, *pats, **opts):
+ # Because we put the standins in a bad state (by updating them)
+ # and then return them to a correct state we need to lock to
+ # prevent others from changing them in their incorrect state.
+ wlock = repo.wlock()
+ try:
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ (modified, added, removed, missing, unknown, ignored, clean) = \
+ lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
+ for lfile in modified:
+ lfutil.updatestandin(repo, lfutil.standin(lfile))
+ for lfile in missing:
+ if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
+ os.unlink(repo.wjoin(lfutil.standin(lfile)))
+
+ try:
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ oldmatch = None # for the closure
+ def overridematch(ctx, pats=[], opts={}, globbed=False,
+ default='relpath'):
+ match = oldmatch(ctx, pats, opts, globbed, default)
+ m = copy.copy(match)
+ def tostandin(f):
+ if lfutil.standin(f) in ctx:
+ return lfutil.standin(f)
+ elif lfutil.standin(f) in repo[None]:
+ return None
+ return f
+ m._files = [tostandin(f) for f in m._files]
+ m._files = [f for f in m._files if f is not None]
+ m._fmap = set(m._files)
+ origmatchfn = m.matchfn
+ def matchfn(f):
+ if lfutil.isstandin(f):
+ # We need to keep track of what largefiles are being
+ # matched so we know which ones to update later --
+ # otherwise we accidentally revert changes to other
+ # largefiles. This is repo-specific, so duckpunch the
+ # repo object to keep the list of largefiles for us
+ # later.
+ if origmatchfn(lfutil.splitstandin(f)) and \
+ (f in repo[None] or f in ctx):
+ lfileslist = getattr(repo, '_lfilestoupdate', [])
+ lfileslist.append(lfutil.splitstandin(f))
+ repo._lfilestoupdate = lfileslist
+ return True
+ else:
+ return False
+ return origmatchfn(f)
+ m.matchfn = matchfn
+ return m
+ oldmatch = installmatchfn(overridematch)
+ scmutil.match
+ matches = overridematch(repo[None], pats, opts)
+ orig(ui, repo, *pats, **opts)
+ finally:
+ restorematchfn()
+ lfileslist = getattr(repo, '_lfilestoupdate', [])
+ lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
+ printmessage=False)
+
+ # empty out the largefiles list so we start fresh next time
+ repo._lfilestoupdate = []
+ for lfile in modified:
+ if lfile in lfileslist:
+ if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
+ in repo['.']:
+ lfutil.writestandin(repo, lfutil.standin(lfile),
+ repo['.'][lfile].data().strip(),
+ 'x' in repo['.'][lfile].flags())
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ for lfile in added:
+ standin = lfutil.standin(lfile)
+ if standin not in ctx and (standin in matches or opts.get('all')):
+ if lfile in lfdirstate:
+ lfdirstate.drop(lfile)
+ util.unlinkpath(repo.wjoin(standin))
+ lfdirstate.write()
+ finally:
+ wlock.release()
+
+def hgupdate(orig, repo, node):
+ # Only call updatelfiles the standins that have changed to save time
+ oldstandins = lfutil.getstandinsstate(repo)
+ result = orig(repo, node)
+ newstandins = lfutil.getstandinsstate(repo)
+ filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+ lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
+ return result
+
+def hgclean(orig, repo, node, show_stats=True):
+ result = orig(repo, node, show_stats)
+ lfcommands.updatelfiles(repo.ui, repo)
+ return result
+
+def hgmerge(orig, repo, node, force=None, remind=True):
+ # Mark the repo as being in the middle of a merge, so that
+ # updatelfiles() will know that it needs to trust the standins in
+ # the working copy, not in the standins in the current node
+ repo._ismerging = True
+ try:
+ result = orig(repo, node, force, remind)
+ lfcommands.updatelfiles(repo.ui, repo)
+ finally:
+ repo._ismerging = False
+ return result
+
+# When we rebase a repository with remotely changed largefiles, we need to
+# take some extra care so that the largefiles are correctly updated in the
+# working copy
+def overridepull(orig, ui, repo, source=None, **opts):
+ revsprepull = len(repo)
+ if opts.get('rebase', False):
+ repo._isrebasing = True
+ try:
+ if opts.get('update'):
+ del opts['update']
+ ui.debug('--update and --rebase are not compatible, ignoring '
+ 'the update flag\n')
+ del opts['rebase']
+ cmdutil.bailifchanged(repo)
+ origpostincoming = commands.postincoming
+ def _dummy(*args, **kwargs):
+ pass
+ commands.postincoming = _dummy
+ repo.lfpullsource = source
+ if not source:
+ source = 'default'
+ try:
+ result = commands.pull(ui, repo, source, **opts)
+ finally:
+ commands.postincoming = origpostincoming
+ revspostpull = len(repo)
+ if revspostpull > revsprepull:
+ result = result or rebase.rebase(ui, repo)
+ finally:
+ repo._isrebasing = False
+ else:
+ repo.lfpullsource = source
+ if not source:
+ source = 'default'
+ oldheads = lfutil.getcurrentheads(repo)
+ result = orig(ui, repo, source, **opts)
+ # If we do not have the new largefiles for any new heads we pulled, we
+ # will run into a problem later if we try to merge or rebase with one of
+ # these heads, so cache the largefiles now direclty into the system
+ # cache.
+ ui.status(_("caching new largefiles\n"))
+ numcached = 0
+ heads = lfutil.getcurrentheads(repo)
+ newheads = set(heads).difference(set(oldheads))
+ for head in newheads:
+ (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
+ numcached += len(cached)
+ ui.status(_("%d largefiles cached\n") % numcached)
+ if opts.get('all_largefiles'):
+ revspostpull = len(repo)
+ revs = []
+ for rev in xrange(revsprepull + 1, revspostpull):
+ revs.append(repo[rev].rev())
+ lfcommands.downloadlfiles(ui, repo, revs)
+ return result
+
+def overrideclone(orig, ui, source, dest=None, **opts):
+ if dest is None:
+ dest = hg.defaultdest(source)
+ if opts.get('all_largefiles') and not hg.islocal(dest):
+ raise util.Abort(_(
+ '--all-largefiles is incompatible with non-local destination %s' %
+ dest))
+ result = hg.clone(ui, opts, source, dest,
+ pull=opts.get('pull'),
+ stream=opts.get('uncompressed'),
+ rev=opts.get('rev'),
+ update=True, # required for successful walkchangerevs
+ branch=opts.get('branch'))
+ if result is None:
+ return True
+ if opts.get('all_largefiles'):
+ sourcerepo, destrepo = result
+ success, missing = lfcommands.downloadlfiles(ui, destrepo.local(), None)
+ return missing != 0
+ return result is None
+
+def overriderebase(orig, ui, repo, **opts):
+ repo._isrebasing = True
+ try:
+ orig(ui, repo, **opts)
+ finally:
+ repo._isrebasing = False
+
+def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
+ prefix=None, mtime=None, subrepos=None):
+ # No need to lock because we are only reading history and
+ # largefile caches, neither of which are modified.
+ lfcommands.cachelfiles(repo.ui, repo, node)
+
+ if kind not in archival.archivers:
+ raise util.Abort(_("unknown archive type '%s'") % kind)
+
+ ctx = repo[node]
+
+ if kind == 'files':
+ if prefix:
+ raise util.Abort(
+ _('cannot give prefix when archiving to files'))
+ else:
+ prefix = archival.tidyprefix(dest, kind, prefix)
+
+ def write(name, mode, islink, getdata):
+ if matchfn and not matchfn(name):
+ return
+ data = getdata()
+ if decode:
+ data = repo.wwritedata(name, data)
+ archiver.addfile(prefix + name, mode, islink, data)
+
+ archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
+
+ if repo.ui.configbool("ui", "archivemeta", True):
+ def metadata():
+ base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
+ hex(repo.changelog.node(0)), hex(node), ctx.branch())
+
+ tags = ''.join('tag: %s\n' % t for t in ctx.tags()
+ if repo.tagtype(t) == 'global')
+ if not tags:
+ repo.ui.pushbuffer()
+ opts = {'template': '{latesttag}\n{latesttagdistance}',
+ 'style': '', 'patch': None, 'git': None}
+ cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
+ ltags, dist = repo.ui.popbuffer().split('\n')
+ tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
+ tags += 'latesttagdistance: %s\n' % dist
+
+ return base + tags
+
+ write('.hg_archival.txt', 0644, False, metadata)
+
+ for f in ctx:
+ ff = ctx.flags(f)
+ getdata = ctx[f].data
+ if lfutil.isstandin(f):
+ path = lfutil.findfile(repo, getdata().strip())
+ if path is None:
+ raise util.Abort(
+ _('largefile %s not found in repo store or system cache')
+ % lfutil.splitstandin(f))
+ f = lfutil.splitstandin(f)
+
+ def getdatafn():
+ fd = None
+ try:
+ fd = open(path, 'rb')
+ return fd.read()
+ finally:
+ if fd:
+ fd.close()
+
+ getdata = getdatafn
+ write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
+
+ if subrepos:
+ for subpath in ctx.substate:
+ sub = ctx.sub(subpath)
+ submatch = match_.narrowmatcher(subpath, matchfn)
+ sub.archive(repo.ui, archiver, prefix, submatch)
+
+ archiver.done()
+
+def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
+ rev = repo._state[1]
+ ctx = repo._repo[rev]
+
+ lfcommands.cachelfiles(ui, repo._repo, ctx.node())
+
+ def write(name, mode, islink, getdata):
+ # At this point, the standin has been replaced with the largefile name,
+ # so the normal matcher works here without the lfutil variants.
+ if match and not match(f):
+ return
+ data = getdata()
+
+ archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
+
+ for f in ctx:
+ ff = ctx.flags(f)
+ getdata = ctx[f].data
+ if lfutil.isstandin(f):
+ path = lfutil.findfile(repo._repo, getdata().strip())
+ if path is None:
+ raise util.Abort(
+ _('largefile %s not found in repo store or system cache')
+ % lfutil.splitstandin(f))
+ f = lfutil.splitstandin(f)
+
+ def getdatafn():
+ fd = None
+ try:
+ fd = open(os.path.join(prefix, path), 'rb')
+ return fd.read()
+ finally:
+ if fd:
+ fd.close()
+
+ getdata = getdatafn
+
+ write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
+
+ for subpath in ctx.substate:
+ sub = ctx.sub(subpath)
+ submatch = match_.narrowmatcher(subpath, match)
+ sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
+ submatch)
+
+# If a largefile is modified, the change is not reflected in its
+# standin until a commit. cmdutil.bailifchanged() raises an exception
+# if the repo has uncommitted changes. Wrap it to also check if
+# largefiles were changed. This is used by bisect and backout.
+def overridebailifchanged(orig, repo):
+ orig(repo)
+ repo.lfstatus = True
+ modified, added, removed, deleted = repo.status()[:4]
+ repo.lfstatus = False
+ if modified or added or removed or deleted:
+ raise util.Abort(_('outstanding uncommitted changes'))
+
+# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
+def overridefetch(orig, ui, repo, *pats, **opts):
+ repo.lfstatus = True
+ modified, added, removed, deleted = repo.status()[:4]
+ repo.lfstatus = False
+ if modified or added or removed or deleted:
+ raise util.Abort(_('outstanding uncommitted changes'))
+ return orig(ui, repo, *pats, **opts)
+
+def overrideforget(orig, ui, repo, *pats, **opts):
+ installnormalfilesmatchfn(repo[None].manifest())
+ orig(ui, repo, *pats, **opts)
+ restorematchfn()
+ m = scmutil.match(repo[None], pats, opts)
+
+ try:
+ repo.lfstatus = True
+ s = repo.status(match=m, clean=True)
+ finally:
+ repo.lfstatus = False
+ forget = sorted(s[0] + s[1] + s[3] + s[6])
+ forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
+
+ for f in forget:
+ if lfutil.standin(f) not in repo.dirstate and not \
+ os.path.isdir(m.rel(lfutil.standin(f))):
+ ui.warn(_('not removing %s: file is already untracked\n')
+ % m.rel(f))
+
+ for f in forget:
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ # Need to lock because standin files are deleted then removed from the
+ # repository and we could race inbetween.
+ wlock = repo.wlock()
+ try:
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ for f in forget:
+ if lfdirstate[f] == 'a':
+ lfdirstate.drop(f)
+ else:
+ lfdirstate.remove(f)
+ lfdirstate.write()
+ lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
+ unlink=True)
+ finally:
+ wlock.release()
+
+def getoutgoinglfiles(ui, repo, dest=None, **opts):
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest, opts.get('branch'))
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+ if revs:
+ revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
+
+ try:
+ remote = hg.peer(repo, opts, dest)
+ except error.RepoError:
+ return None
+ o = lfutil.findoutgoing(repo, remote, False)
+ if not o:
+ return None
+ o = repo.changelog.nodesbetween(o, revs)[0]
+ if opts.get('newest_first'):
+ o.reverse()
+
+ toupload = set()
+ for n in o:
+ parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
+ ctx = repo[n]
+ files = set(ctx.files())
+ if len(parents) == 2:
+ mc = ctx.manifest()
+ mp1 = ctx.parents()[0].manifest()
+ mp2 = ctx.parents()[1].manifest()
+ for f in mp1:
+ if f not in mc:
+ files.add(f)
+ for f in mp2:
+ if f not in mc:
+ files.add(f)
+ for f in mc:
+ if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+ files.add(f)
+ toupload = toupload.union(
+ set([f for f in files if lfutil.isstandin(f) and f in ctx]))
+ return toupload
+
+def overrideoutgoing(orig, ui, repo, dest=None, **opts):
+ orig(ui, repo, dest, **opts)
+
+ if opts.pop('large', None):
+ toupload = getoutgoinglfiles(ui, repo, dest, **opts)
+ if toupload is None:
+ ui.status(_('largefiles: No remote repo\n'))
+ else:
+ ui.status(_('largefiles to upload:\n'))
+ for file in toupload:
+ ui.status(lfutil.splitstandin(file) + '\n')
+ ui.status('\n')
+
+def overridesummary(orig, ui, repo, *pats, **opts):
+ try:
+ repo.lfstatus = True
+ orig(ui, repo, *pats, **opts)
+ finally:
+ repo.lfstatus = False
+
+ if opts.pop('large', None):
+ toupload = getoutgoinglfiles(ui, repo, None, **opts)
+ if toupload is None:
+ ui.status(_('largefiles: No remote repo\n'))
+ else:
+ ui.status(_('largefiles: %d to upload\n') % len(toupload))
+
+def overrideaddremove(orig, ui, repo, *pats, **opts):
+ if not lfutil.islfilesrepo(repo):
+ return orig(ui, repo, *pats, **opts)
+ # Get the list of missing largefiles so we can remove them
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
+ False, False)
+ (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
+
+ # Call into the normal remove code, but the removing of the standin, we want
+ # to have handled by original addremove. Monkey patching here makes sure
+ # we don't remove the standin in the largefiles code, preventing a very
+ # confused state later.
+ if missing:
+ m = [repo.wjoin(f) for f in missing]
+ repo._isaddremove = True
+ removelargefiles(ui, repo, *m, **opts)
+ repo._isaddremove = False
+ # Call into the normal add code, and any files that *should* be added as
+ # largefiles will be
+ addlargefiles(ui, repo, *pats, **opts)
+ # Now that we've handled largefiles, hand off to the original addremove
+ # function to take care of the rest. Make sure it doesn't do anything with
+ # largefiles by installing a matcher that will ignore them.
+ installnormalfilesmatchfn(repo[None].manifest())
+ result = orig(ui, repo, *pats, **opts)
+ restorematchfn()
+ return result
+
+# Calling purge with --all will cause the largefiles to be deleted.
+# Override repo.status to prevent this from happening.
+def overridepurge(orig, ui, repo, *dirs, **opts):
+ oldstatus = repo.status
+ def overridestatus(node1='.', node2=None, match=None, ignored=False,
+ clean=False, unknown=False, listsubrepos=False):
+ r = oldstatus(node1, node2, match, ignored, clean, unknown,
+ listsubrepos)
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ modified, added, removed, deleted, unknown, ignored, clean = r
+ unknown = [f for f in unknown if lfdirstate[f] == '?']
+ ignored = [f for f in ignored if lfdirstate[f] == '?']
+ return modified, added, removed, deleted, unknown, ignored, clean
+ repo.status = overridestatus
+ orig(ui, repo, *dirs, **opts)
+ repo.status = oldstatus
+
+def overriderollback(orig, ui, repo, **opts):
+ result = orig(ui, repo, **opts)
+ merge.update(repo, node=None, branchmerge=False, force=True,
+ partial=lfutil.isstandin)
+ wlock = repo.wlock()
+ try:
+ lfdirstate = lfutil.openlfdirstate(ui, repo)
+ lfiles = lfutil.listlfiles(repo)
+ oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
+ for file in lfiles:
+ if file in oldlfiles:
+ lfdirstate.normallookup(file)
+ else:
+ lfdirstate.add(file)
+ lfdirstate.write()
+ finally:
+ wlock.release()
+ return result
+
+def overridetransplant(orig, ui, repo, *revs, **opts):
+ try:
+ oldstandins = lfutil.getstandinsstate(repo)
+ repo._istransplanting = True
+ result = orig(ui, repo, *revs, **opts)
+ newstandins = lfutil.getstandinsstate(repo)
+ filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+ lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
+ printmessage=True)
+ finally:
+ repo._istransplanting = False
+ return result
+
+def overridecat(orig, ui, repo, file1, *pats, **opts):
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ if not lfutil.standin(file1) in ctx:
+ result = orig(ui, repo, file1, *pats, **opts)
+ return result
+ return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
diff --git a/hgext/largefiles/proto.py b/hgext/largefiles/proto.py
new file mode 100644
index 0000000..de89e32
--- /dev/null
+++ b/hgext/largefiles/proto.py
@@ -0,0 +1,173 @@
+# Copyright 2011 Fog Creek Software
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import urllib2
+
+from mercurial import error, httppeer, util, wireproto
+from mercurial.wireproto import batchable, future
+from mercurial.i18n import _
+
+import lfutil
+
+LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
+ '\n\nPlease enable it in your Mercurial config '
+ 'file.\n')
+
+def putlfile(repo, proto, sha):
+ '''Put a largefile into a repository's local store and into the
+ user cache.'''
+ proto.redirect()
+
+ path = lfutil.storepath(repo, sha)
+ util.makedirs(os.path.dirname(path))
+ tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
+
+ try:
+ try:
+ proto.getfile(tmpfp)
+ tmpfp._fp.seek(0)
+ if sha != lfutil.hexsha1(tmpfp._fp):
+ raise IOError(0, _('largefile contents do not match hash'))
+ tmpfp.close()
+ lfutil.linktousercache(repo, sha)
+ except IOError, e:
+ repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
+ (sha, e.strerror))
+ return wireproto.pushres(1)
+ finally:
+ tmpfp.discard()
+
+ return wireproto.pushres(0)
+
+def getlfile(repo, proto, sha):
+ '''Retrieve a largefile from the repository-local cache or system
+ cache.'''
+ filename = lfutil.findfile(repo, sha)
+ if not filename:
+ raise util.Abort(_('requested largefile %s not present in cache') % sha)
+ f = open(filename, 'rb')
+ length = os.fstat(f.fileno())[6]
+
+ # Since we can't set an HTTP content-length header here, and
+ # Mercurial core provides no way to give the length of a streamres
+ # (and reading the entire file into RAM would be ill-advised), we
+ # just send the length on the first line of the response, like the
+ # ssh proto does for string responses.
+ def generator():
+ yield '%d\n' % length
+ for chunk in f:
+ yield chunk
+ return wireproto.streamres(generator())
+
+def statlfile(repo, proto, sha):
+ '''Return '2\n' if the largefile is missing, '1\n' if it has a
+ mismatched checksum, or '0\n' if it is in good condition'''
+ filename = lfutil.findfile(repo, sha)
+ if not filename:
+ return '2\n'
+ fd = None
+ try:
+ fd = open(filename, 'rb')
+ return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
+ finally:
+ if fd:
+ fd.close()
+
+def wirereposetup(ui, repo):
+ class lfileswirerepository(repo.__class__):
+ def putlfile(self, sha, fd):
+ # unfortunately, httprepository._callpush tries to convert its
+ # input file-like into a bundle before sending it, so we can't use
+ # it ...
+ if issubclass(self.__class__, httppeer.httppeer):
+ res = None
+ try:
+ res = self._call('putlfile', data=fd, sha=sha,
+ headers={'content-type':'application/mercurial-0.1'})
+ d, output = res.split('\n', 1)
+ for l in output.splitlines(True):
+ self.ui.warn(_('remote: '), l, '\n')
+ return int(d)
+ except (ValueError, urllib2.HTTPError):
+ self.ui.warn(_('unexpected putlfile response: %s') % res)
+ return 1
+ # ... but we can't use sshrepository._call because the data=
+ # argument won't get sent, and _callpush does exactly what we want
+ # in this case: send the data straight through
+ else:
+ try:
+ ret, output = self._callpush("putlfile", fd, sha=sha)
+ if ret == "":
+ raise error.ResponseError(_('putlfile failed:'),
+ output)
+ return int(ret)
+ except IOError:
+ return 1
+ except ValueError:
+ raise error.ResponseError(
+ _('putlfile failed (unexpected response):'), ret)
+
+ def getlfile(self, sha):
+ stream = self._callstream("getlfile", sha=sha)
+ length = stream.readline()
+ try:
+ length = int(length)
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"),
+ length))
+ return (length, stream)
+
+ @batchable
+ def statlfile(self, sha):
+ f = future()
+ result = {'sha': sha}
+ yield result, f
+ try:
+ yield int(f.value)
+ except (ValueError, urllib2.HTTPError):
+ # If the server returns anything but an integer followed by a
+ # newline, newline, it's not speaking our language; if we get
+ # an HTTP error, we can't be sure the largefile is present;
+ # either way, consider it missing.
+ yield 2
+
+ repo.__class__ = lfileswirerepository
+
+# advertise the largefiles=serve capability
+def capabilities(repo, proto):
+ return capabilitiesorig(repo, proto) + ' largefiles=serve'
+
+# duplicate what Mercurial's new out-of-band errors mechanism does, because
+# clients old and new alike both handle it well
+def webprotorefuseclient(self, message):
+ self.req.header([('Content-Type', 'application/hg-error')])
+ return message
+
+def sshprotorefuseclient(self, message):
+ self.ui.write_err('%s\n-\n' % message)
+ self.fout.write('\n')
+ self.fout.flush()
+
+ return ''
+
+def heads(repo, proto):
+ if lfutil.islfilesrepo(repo):
+ return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
+ return wireproto.heads(repo, proto)
+
+def sshrepocallstream(self, cmd, **args):
+ if cmd == 'heads' and self.capable('largefiles'):
+ cmd = 'lheads'
+ if cmd == 'batch' and self.capable('largefiles'):
+ args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
+ return ssholdcallstream(self, cmd, **args)
+
+def httprepocallstream(self, cmd, **args):
+ if cmd == 'heads' and self.capable('largefiles'):
+ cmd = 'lheads'
+ if cmd == 'batch' and self.capable('largefiles'):
+ args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
+ return httpoldcallstream(self, cmd, **args)
diff --git a/hgext/largefiles/remotestore.py b/hgext/largefiles/remotestore.py
new file mode 100644
index 0000000..6c3d371
--- /dev/null
+++ b/hgext/largefiles/remotestore.py
@@ -0,0 +1,110 @@
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''remote largefile store; the base class for servestore'''
+
+import urllib2
+
+from mercurial import util
+from mercurial.i18n import _
+from mercurial.wireproto import remotebatch
+
+import lfutil
+import basestore
+
+class remotestore(basestore.basestore):
+ '''a largefile store accessed over a network'''
+ def __init__(self, ui, repo, url):
+ super(remotestore, self).__init__(ui, repo, url)
+
+ def put(self, source, hash):
+ if self.sendfile(source, hash):
+ raise util.Abort(
+ _('remotestore: could not put %s to remote store %s')
+ % (source, self.url))
+ self.ui.debug(
+ _('remotestore: put %s to remote store %s') % (source, self.url))
+
+ def exists(self, hashes):
+ return self._verify(hashes)
+
+ def sendfile(self, filename, hash):
+ self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
+ fd = None
+ try:
+ try:
+ fd = lfutil.httpsendfile(self.ui, filename)
+ except IOError, e:
+ raise util.Abort(
+ _('remotestore: could not open file %s: %s')
+ % (filename, str(e)))
+ return self._put(hash, fd)
+ finally:
+ if fd:
+ fd.close()
+
+ def _getfile(self, tmpfile, filename, hash):
+ # quit if the largefile isn't there
+ stat = self._stat(hash)
+ if stat == 1:
+ raise util.Abort(_('remotestore: largefile %s is invalid') % hash)
+ elif stat == 2:
+ raise util.Abort(_('remotestore: largefile %s is missing') % hash)
+
+ try:
+ length, infile = self._get(hash)
+ except urllib2.HTTPError, e:
+ # 401s get converted to util.Aborts; everything else is fine being
+ # turned into a StoreError
+ raise basestore.StoreError(filename, hash, self.url, str(e))
+ except urllib2.URLError, e:
+ # This usually indicates a connection problem, so don't
+ # keep trying with the other files... they will probably
+ # all fail too.
+ raise util.Abort('%s: %s' % (self.url, e.reason))
+ except IOError, e:
+ raise basestore.StoreError(filename, hash, self.url, str(e))
+
+ # Mercurial does not close its SSH connections after writing a stream
+ if length is not None:
+ infile = lfutil.limitreader(infile, length)
+ return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
+
+ def _verify(self, hashes):
+ return self._stat(hashes)
+
+ def _verifyfile(self, cctx, cset, contents, standin, verified):
+ filename = lfutil.splitstandin(standin)
+ if not filename:
+ return False
+ fctx = cctx[standin]
+ key = (filename, fctx.filenode())
+ if key in verified:
+ return False
+
+ verified.add(key)
+
+ stat = self._stat(hash)
+ if not stat:
+ return False
+ elif stat == 1:
+ self.ui.warn(
+ _('changeset %s: %s: contents differ\n')
+ % (cset, filename))
+ return True # failed
+ elif stat == 2:
+ self.ui.warn(
+ _('changeset %s: %s missing\n')
+ % (cset, filename))
+ return True # failed
+ else:
+ raise RuntimeError('verify failed: unexpected response from '
+ 'statlfile (%r)' % stat)
+
+ def batch(self):
+ '''Support for remote batching.'''
+ return remotebatch(self)
+
diff --git a/hgext/largefiles/reposetup.py b/hgext/largefiles/reposetup.py
new file mode 100644
index 0000000..04ab704
--- /dev/null
+++ b/hgext/largefiles/reposetup.py
@@ -0,0 +1,475 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''setup for largefiles repositories: reposetup'''
+import copy
+import types
+import os
+
+from mercurial import context, error, manifest, match as match_, util
+from mercurial import node as node_
+from mercurial.i18n import _
+
+import lfcommands
+import proto
+import lfutil
+
+def reposetup(ui, repo):
+ # wire repositories should be given new wireproto functions but not the
+ # other largefiles modifications
+ if not repo.local():
+ return proto.wirereposetup(ui, repo)
+
+ for name in ('status', 'commitctx', 'commit', 'push'):
+ method = getattr(repo, name)
+ if (isinstance(method, types.FunctionType) and
+ method.func_name == 'wrap'):
+ ui.warn(_('largefiles: repo method %r appears to have already been'
+ ' wrapped by another extension: '
+ 'largefiles may behave incorrectly\n')
+ % name)
+
+ class lfilesrepo(repo.__class__):
+ lfstatus = False
+ def status_nolfiles(self, *args, **kwargs):
+ return super(lfilesrepo, self).status(*args, **kwargs)
+
+ # When lfstatus is set, return a context that gives the names
+ # of largefiles instead of their corresponding standins and
+ # identifies the largefiles as always binary, regardless of
+ # their actual contents.
+ def __getitem__(self, changeid):
+ ctx = super(lfilesrepo, self).__getitem__(changeid)
+ if self.lfstatus:
+ class lfilesmanifestdict(manifest.manifestdict):
+ def __contains__(self, filename):
+ if super(lfilesmanifestdict,
+ self).__contains__(filename):
+ return True
+ return super(lfilesmanifestdict,
+ self).__contains__(lfutil.standin(filename))
+ class lfilesctx(ctx.__class__):
+ def files(self):
+ filenames = super(lfilesctx, self).files()
+ return [lfutil.splitstandin(f) or f for f in filenames]
+ def manifest(self):
+ man1 = super(lfilesctx, self).manifest()
+ man1.__class__ = lfilesmanifestdict
+ return man1
+ def filectx(self, path, fileid=None, filelog=None):
+ try:
+ if filelog is not None:
+ result = super(lfilesctx, self).filectx(
+ path, fileid, filelog)
+ else:
+ result = super(lfilesctx, self).filectx(
+ path, fileid)
+ except error.LookupError:
+ # Adding a null character will cause Mercurial to
+ # identify this as a binary file.
+ if filelog is not None:
+ result = super(lfilesctx, self).filectx(
+ lfutil.standin(path), fileid, filelog)
+ else:
+ result = super(lfilesctx, self).filectx(
+ lfutil.standin(path), fileid)
+ olddata = result.data
+ result.data = lambda: olddata() + '\0'
+ return result
+ ctx.__class__ = lfilesctx
+ return ctx
+
+ # Figure out the status of big files and insert them into the
+ # appropriate list in the result. Also removes standin files
+ # from the listing. Revert to the original status if
+ # self.lfstatus is False.
+ def status(self, node1='.', node2=None, match=None, ignored=False,
+ clean=False, unknown=False, listsubrepos=False):
+ listignored, listclean, listunknown = ignored, clean, unknown
+ if not self.lfstatus:
+ return super(lfilesrepo, self).status(node1, node2, match,
+ listignored, listclean, listunknown, listsubrepos)
+ else:
+ # some calls in this function rely on the old version of status
+ self.lfstatus = False
+ if isinstance(node1, context.changectx):
+ ctx1 = node1
+ else:
+ ctx1 = repo[node1]
+ if isinstance(node2, context.changectx):
+ ctx2 = node2
+ else:
+ ctx2 = repo[node2]
+ working = ctx2.rev() is None
+ parentworking = working and ctx1 == self['.']
+
+ def inctx(file, ctx):
+ try:
+ if ctx.rev() is None:
+ return file in ctx.manifest()
+ ctx[file]
+ return True
+ except KeyError:
+ return False
+
+ if match is None:
+ match = match_.always(self.root, self.getcwd())
+
+ # First check if there were files specified on the
+ # command line. If there were, and none of them were
+ # largefiles, we should just bail here and let super
+ # handle it -- thus gaining a big performance boost.
+ lfdirstate = lfutil.openlfdirstate(ui, self)
+ if match.files() and not match.anypats():
+ for f in lfdirstate:
+ if match(f):
+ break
+ else:
+ return super(lfilesrepo, self).status(node1, node2,
+ match, listignored, listclean,
+ listunknown, listsubrepos)
+
+ # Create a copy of match that matches standins instead
+ # of largefiles.
+ def tostandins(files):
+ if not working:
+ return files
+ newfiles = []
+ dirstate = repo.dirstate
+ for f in files:
+ sf = lfutil.standin(f)
+ if sf in dirstate:
+ newfiles.append(sf)
+ elif sf in dirstate.dirs():
+ # Directory entries could be regular or
+ # standin, check both
+ newfiles.extend((f, sf))
+ else:
+ newfiles.append(f)
+ return newfiles
+
+ # Create a function that we can use to override what is
+ # normally the ignore matcher. We've already checked
+ # for ignored files on the first dirstate walk, and
+ # unecessarily re-checking here causes a huge performance
+ # hit because lfdirstate only knows about largefiles
+ def _ignoreoverride(self):
+ return False
+
+ m = copy.copy(match)
+ m._files = tostandins(m._files)
+
+ # Get ignored files here even if we weren't asked for them; we
+ # must use the result here for filtering later
+ result = super(lfilesrepo, self).status(node1, node2, m,
+ True, clean, unknown, listsubrepos)
+ if working:
+ try:
+ # Any non-largefiles that were explicitly listed must be
+ # taken out or lfdirstate.status will report an error.
+ # The status of these files was already computed using
+ # super's status.
+ # Override lfdirstate's ignore matcher to not do
+ # anything
+ origignore = lfdirstate._ignore
+ lfdirstate._ignore = _ignoreoverride
+
+ def sfindirstate(f):
+ sf = lfutil.standin(f)
+ dirstate = repo.dirstate
+ return sf in dirstate or sf in dirstate.dirs()
+ match._files = [f for f in match._files
+ if sfindirstate(f)]
+ # Don't waste time getting the ignored and unknown
+ # files again; we already have them
+ s = lfdirstate.status(match, [], False,
+ listclean, False)
+ (unsure, modified, added, removed, missing, unknown,
+ ignored, clean) = s
+ # Replace the list of ignored and unknown files with
+ # the previously caclulated lists, and strip out the
+ # largefiles
+ lfiles = set(lfdirstate._map)
+ ignored = set(result[5]).difference(lfiles)
+ unknown = set(result[4]).difference(lfiles)
+ if parentworking:
+ for lfile in unsure:
+ standin = lfutil.standin(lfile)
+ if standin not in ctx1:
+ # from second parent
+ modified.append(lfile)
+ elif ctx1[standin].data().strip() \
+ != lfutil.hashfile(self.wjoin(lfile)):
+ modified.append(lfile)
+ else:
+ clean.append(lfile)
+ lfdirstate.normal(lfile)
+ else:
+ tocheck = unsure + modified + added + clean
+ modified, added, clean = [], [], []
+
+ for lfile in tocheck:
+ standin = lfutil.standin(lfile)
+ if inctx(standin, ctx1):
+ if ctx1[standin].data().strip() != \
+ lfutil.hashfile(self.wjoin(lfile)):
+ modified.append(lfile)
+ else:
+ clean.append(lfile)
+ else:
+ added.append(lfile)
+ finally:
+ # Replace the original ignore function
+ lfdirstate._ignore = origignore
+
+ for standin in ctx1.manifest():
+ if not lfutil.isstandin(standin):
+ continue
+ lfile = lfutil.splitstandin(standin)
+ if not match(lfile):
+ continue
+ if lfile not in lfdirstate:
+ removed.append(lfile)
+
+ # Filter result lists
+ result = list(result)
+
+ # Largefiles are not really removed when they're
+ # still in the normal dirstate. Likewise, normal
+ # files are not really removed if it's still in
+ # lfdirstate. This happens in merges where files
+ # change type.
+ removed = [f for f in removed if f not in repo.dirstate]
+ result[2] = [f for f in result[2] if f not in lfdirstate]
+
+ # Unknown files
+ unknown = set(unknown).difference(ignored)
+ result[4] = [f for f in unknown
+ if (repo.dirstate[f] == '?' and
+ not lfutil.isstandin(f))]
+ # Ignored files were calculated earlier by the dirstate,
+ # and we already stripped out the largefiles from the list
+ result[5] = ignored
+ # combine normal files and largefiles
+ normals = [[fn for fn in filelist
+ if not lfutil.isstandin(fn)]
+ for filelist in result]
+ lfiles = (modified, added, removed, missing, [], [], clean)
+ result = [sorted(list1 + list2)
+ for (list1, list2) in zip(normals, lfiles)]
+ else:
+ def toname(f):
+ if lfutil.isstandin(f):
+ return lfutil.splitstandin(f)
+ return f
+ result = [[toname(f) for f in items] for items in result]
+
+ if not listunknown:
+ result[4] = []
+ if not listignored:
+ result[5] = []
+ if not listclean:
+ result[6] = []
+ self.lfstatus = True
+ return result
+
+ # As part of committing, copy all of the largefiles into the
+ # cache.
+ def commitctx(self, *args, **kwargs):
+ node = super(lfilesrepo, self).commitctx(*args, **kwargs)
+ lfutil.copyalltostore(self, node)
+ return node
+
+ # Before commit, largefile standins have not had their
+ # contents updated to reflect the hash of their largefile.
+ # Do that here.
+ def commit(self, text="", user=None, date=None, match=None,
+ force=False, editor=False, extra={}):
+ orig = super(lfilesrepo, self).commit
+
+ wlock = repo.wlock()
+ try:
+ # Case 0: Rebase or Transplant
+ # We have to take the time to pull down the new largefiles now.
+ # Otherwise, any largefiles that were modified in the
+ # destination changesets get overwritten, either by the rebase
+ # or in the first commit after the rebase or transplant.
+ # updatelfiles will update the dirstate to mark any pulled
+ # largefiles as modified
+ if getattr(repo, "_isrebasing", False) or \
+ getattr(repo, "_istransplanting", False):
+ lfcommands.updatelfiles(repo.ui, repo, filelist=None,
+ printmessage=False)
+ result = orig(text=text, user=user, date=date, match=match,
+ force=force, editor=editor, extra=extra)
+ return result
+ # Case 1: user calls commit with no specific files or
+ # include/exclude patterns: refresh and commit all files that
+ # are "dirty".
+ if ((match is None) or
+ (not match.anypats() and not match.files())):
+ # Spend a bit of time here to get a list of files we know
+ # are modified so we can compare only against those.
+ # It can cost a lot of time (several seconds)
+ # otherwise to update all standins if the largefiles are
+ # large.
+ lfdirstate = lfutil.openlfdirstate(ui, self)
+ dirtymatch = match_.always(repo.root, repo.getcwd())
+ s = lfdirstate.status(dirtymatch, [], False, False, False)
+ modifiedfiles = []
+ for i in s:
+ modifiedfiles.extend(i)
+ lfiles = lfutil.listlfiles(self)
+ # this only loops through largefiles that exist (not
+ # removed/renamed)
+ for lfile in lfiles:
+ if lfile in modifiedfiles:
+ if os.path.exists(
+ self.wjoin(lfutil.standin(lfile))):
+ # this handles the case where a rebase is being
+ # performed and the working copy is not updated
+ # yet.
+ if os.path.exists(self.wjoin(lfile)):
+ lfutil.updatestandin(self,
+ lfutil.standin(lfile))
+ lfdirstate.normal(lfile)
+
+ result = orig(text=text, user=user, date=date, match=match,
+ force=force, editor=editor, extra=extra)
+
+ if result is not None:
+ for lfile in lfdirstate:
+ if lfile in modifiedfiles:
+ if (not os.path.exists(repo.wjoin(
+ lfutil.standin(lfile)))) or \
+ (not os.path.exists(repo.wjoin(lfile))):
+ lfdirstate.drop(lfile)
+
+ # This needs to be after commit; otherwise precommit hooks
+ # get the wrong status
+ lfdirstate.write()
+ return result
+
+ for f in match.files():
+ if lfutil.isstandin(f):
+ raise util.Abort(
+ _('file "%s" is a largefile standin') % f,
+ hint=('commit the largefile itself instead'))
+
+ # Case 2: user calls commit with specified patterns: refresh
+ # any matching big files.
+ smatcher = lfutil.composestandinmatcher(self, match)
+ standins = lfutil.dirstatewalk(self.dirstate, smatcher)
+
+ # No matching big files: get out of the way and pass control to
+ # the usual commit() method.
+ if not standins:
+ return orig(text=text, user=user, date=date, match=match,
+ force=force, editor=editor, extra=extra)
+
+ # Refresh all matching big files. It's possible that the
+ # commit will end up failing, in which case the big files will
+ # stay refreshed. No harm done: the user modified them and
+ # asked to commit them, so sooner or later we're going to
+ # refresh the standins. Might as well leave them refreshed.
+ lfdirstate = lfutil.openlfdirstate(ui, self)
+ for standin in standins:
+ lfile = lfutil.splitstandin(standin)
+ if lfdirstate[lfile] <> 'r':
+ lfutil.updatestandin(self, standin)
+ lfdirstate.normal(lfile)
+ else:
+ lfdirstate.drop(lfile)
+
+ # Cook up a new matcher that only matches regular files or
+ # standins corresponding to the big files requested by the
+ # user. Have to modify _files to prevent commit() from
+ # complaining "not tracked" for big files.
+ lfiles = lfutil.listlfiles(repo)
+ match = copy.copy(match)
+ origmatchfn = match.matchfn
+
+ # Check both the list of largefiles and the list of
+ # standins because if a largefile was removed, it
+ # won't be in the list of largefiles at this point
+ match._files += sorted(standins)
+
+ actualfiles = []
+ for f in match._files:
+ fstandin = lfutil.standin(f)
+
+ # ignore known largefiles and standins
+ if f in lfiles or fstandin in standins:
+ continue
+
+ # append directory separator to avoid collisions
+ if not fstandin.endswith(os.sep):
+ fstandin += os.sep
+
+ actualfiles.append(f)
+ match._files = actualfiles
+
+ def matchfn(f):
+ if origmatchfn(f):
+ return f not in lfiles
+ else:
+ return f in standins
+
+ match.matchfn = matchfn
+ result = orig(text=text, user=user, date=date, match=match,
+ force=force, editor=editor, extra=extra)
+ # This needs to be after commit; otherwise precommit hooks
+ # get the wrong status
+ lfdirstate.write()
+ return result
+ finally:
+ wlock.release()
+
+ def push(self, remote, force=False, revs=None, newbranch=False):
+ o = lfutil.findoutgoing(repo, remote, force)
+ if o:
+ toupload = set()
+ o = repo.changelog.nodesbetween(o, revs)[0]
+ for n in o:
+ parents = [p for p in repo.changelog.parents(n)
+ if p != node_.nullid]
+ ctx = repo[n]
+ files = set(ctx.files())
+ if len(parents) == 2:
+ mc = ctx.manifest()
+ mp1 = ctx.parents()[0].manifest()
+ mp2 = ctx.parents()[1].manifest()
+ for f in mp1:
+ if f not in mc:
+ files.add(f)
+ for f in mp2:
+ if f not in mc:
+ files.add(f)
+ for f in mc:
+ if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
+ None):
+ files.add(f)
+
+ toupload = toupload.union(
+ set([ctx[f].data().strip()
+ for f in files
+ if lfutil.isstandin(f) and f in ctx]))
+ lfcommands.uploadlfiles(ui, self, remote, toupload)
+ return super(lfilesrepo, self).push(remote, force, revs,
+ newbranch)
+
+ repo.__class__ = lfilesrepo
+
+ def checkrequireslfiles(ui, repo, **kwargs):
+ if 'largefiles' not in repo.requirements and util.any(
+ lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
+ repo.requirements.add('largefiles')
+ repo._writerequirements()
+
+ ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
+ ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
diff --git a/hgext/largefiles/uisetup.py b/hgext/largefiles/uisetup.py
new file mode 100644
index 0000000..e50190b
--- /dev/null
+++ b/hgext/largefiles/uisetup.py
@@ -0,0 +1,167 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''setup for largefiles extension: uisetup'''
+
+from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
+ httppeer, localrepo, merge, sshpeer, sshserver, wireproto
+from mercurial.i18n import _
+from mercurial.hgweb import hgweb_mod, protocol, webcommands
+from mercurial.subrepo import hgsubrepo
+
+import overrides
+import proto
+
+def uisetup(ui):
+ # Disable auto-status for some commands which assume that all
+ # files in the result are under Mercurial's control
+
+ entry = extensions.wrapcommand(commands.table, 'add',
+ overrides.overrideadd)
+ addopt = [('', 'large', None, _('add as largefile')),
+ ('', 'normal', None, _('add as normal file')),
+ ('', 'lfsize', '', _('add all files above this size '
+ '(in megabytes) as largefiles '
+ '(default: 10)'))]
+ entry[1].extend(addopt)
+
+ entry = extensions.wrapcommand(commands.table, 'addremove',
+ overrides.overrideaddremove)
+ entry = extensions.wrapcommand(commands.table, 'remove',
+ overrides.overrideremove)
+ entry = extensions.wrapcommand(commands.table, 'forget',
+ overrides.overrideforget)
+
+ # Subrepos call status function
+ entry = extensions.wrapcommand(commands.table, 'status',
+ overrides.overridestatus)
+ entry = extensions.wrapfunction(hgsubrepo, 'status',
+ overrides.overridestatusfn)
+
+ entry = extensions.wrapcommand(commands.table, 'log',
+ overrides.overridelog)
+ entry = extensions.wrapcommand(commands.table, 'rollback',
+ overrides.overriderollback)
+ entry = extensions.wrapcommand(commands.table, 'verify',
+ overrides.overrideverify)
+
+ verifyopt = [('', 'large', None, _('verify largefiles')),
+ ('', 'lfa', None,
+ _('verify all revisions of largefiles not just current')),
+ ('', 'lfc', None,
+ _('verify largefile contents not just existence'))]
+ entry[1].extend(verifyopt)
+
+ entry = extensions.wrapcommand(commands.table, 'outgoing',
+ overrides.overrideoutgoing)
+ outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
+ entry[1].extend(outgoingopt)
+ entry = extensions.wrapcommand(commands.table, 'summary',
+ overrides.overridesummary)
+ summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
+ entry[1].extend(summaryopt)
+
+ entry = extensions.wrapcommand(commands.table, 'update',
+ overrides.overrideupdate)
+ entry = extensions.wrapcommand(commands.table, 'pull',
+ overrides.overridepull)
+ pullopt = [('', 'all-largefiles', None,
+ _('download all pulled versions of largefiles'))]
+ entry[1].extend(pullopt)
+ entry = extensions.wrapcommand(commands.table, 'clone',
+ overrides.overrideclone)
+ cloneopt = [('', 'all-largefiles', None,
+ _('download all versions of all largefiles'))]
+
+ entry[1].extend(cloneopt)
+ entry = extensions.wrapcommand(commands.table, 'cat',
+ overrides.overridecat)
+ entry = extensions.wrapfunction(merge, '_checkunknownfile',
+ overrides.overridecheckunknownfile)
+ entry = extensions.wrapfunction(merge, 'manifestmerge',
+ overrides.overridemanifestmerge)
+ entry = extensions.wrapfunction(filemerge, 'filemerge',
+ overrides.overridefilemerge)
+ entry = extensions.wrapfunction(cmdutil, 'copy',
+ overrides.overridecopy)
+
+ # Summary calls dirty on the subrepos
+ entry = extensions.wrapfunction(hgsubrepo, 'dirty',
+ overrides.overridedirty)
+
+ # Backout calls revert so we need to override both the command and the
+ # function
+ entry = extensions.wrapcommand(commands.table, 'revert',
+ overrides.overriderevert)
+ entry = extensions.wrapfunction(commands, 'revert',
+ overrides.overriderevert)
+
+ # clone uses hg._update instead of hg.update even though they are the
+ # same function... so wrap both of them)
+ extensions.wrapfunction(hg, 'update', overrides.hgupdate)
+ extensions.wrapfunction(hg, '_update', overrides.hgupdate)
+ extensions.wrapfunction(hg, 'clean', overrides.hgclean)
+ extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
+
+ extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
+ extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
+ extensions.wrapfunction(cmdutil, 'bailifchanged',
+ overrides.overridebailifchanged)
+
+ # create the new wireproto commands ...
+ wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
+ wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
+ wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
+
+ # ... and wrap some existing ones
+ wireproto.commands['capabilities'] = (proto.capabilities, '')
+ wireproto.commands['heads'] = (proto.heads, '')
+ wireproto.commands['lheads'] = (wireproto.heads, '')
+
+ # make putlfile behave the same as push and {get,stat}lfile behave
+ # the same as pull w.r.t. permissions checks
+ hgweb_mod.perms['putlfile'] = 'push'
+ hgweb_mod.perms['getlfile'] = 'pull'
+ hgweb_mod.perms['statlfile'] = 'pull'
+
+ extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
+
+ # the hello wireproto command uses wireproto.capabilities, so it won't see
+ # our largefiles capability unless we replace the actual function as well.
+ proto.capabilitiesorig = wireproto.capabilities
+ wireproto.capabilities = proto.capabilities
+
+ # these let us reject non-largefiles clients and make them display
+ # our error messages
+ protocol.webproto.refuseclient = proto.webprotorefuseclient
+ sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
+
+ # can't do this in reposetup because it needs to have happened before
+ # wirerepo.__init__ is called
+ proto.ssholdcallstream = sshpeer.sshpeer._callstream
+ proto.httpoldcallstream = httppeer.httppeer._callstream
+ sshpeer.sshpeer._callstream = proto.sshrepocallstream
+ httppeer.httppeer._callstream = proto.httprepocallstream
+
+ # don't die on seeing a repo with the largefiles requirement
+ localrepo.localrepository.supported |= set(['largefiles'])
+
+ # override some extensions' stuff as well
+ for name, module in extensions.extensions():
+ if name == 'fetch':
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
+ overrides.overridefetch)
+ if name == 'purge':
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
+ overrides.overridepurge)
+ if name == 'rebase':
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
+ overrides.overriderebase)
+ if name == 'transplant':
+ extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
+ overrides.overridetransplant)
diff --git a/hgext/largefiles/wirestore.py b/hgext/largefiles/wirestore.py
new file mode 100644
index 0000000..a394cf0
--- /dev/null
+++ b/hgext/largefiles/wirestore.py
@@ -0,0 +1,37 @@
+# Copyright 2010-2011 Fog Creek Software
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''largefile store working over Mercurial's wire protocol'''
+
+import lfutil
+import remotestore
+
+class wirestore(remotestore.remotestore):
+ def __init__(self, ui, repo, remote):
+ cap = remote.capable('largefiles')
+ if not cap:
+ raise lfutil.storeprotonotcapable([])
+ storetypes = cap.split(',')
+ if 'serve' not in storetypes:
+ raise lfutil.storeprotonotcapable(storetypes)
+ self.remote = remote
+ super(wirestore, self).__init__(ui, repo, remote.url())
+
+ def _put(self, hash, fd):
+ return self.remote.putlfile(hash, fd)
+
+ def _get(self, hash):
+ return self.remote.getlfile(hash)
+
+ def _stat(self, hashes):
+ batch = self.remote.batch()
+ futures = {}
+ for hash in hashes:
+ futures[hash] = batch.statlfile(hash)
+ batch.submit()
+ retval = {}
+ for hash in hashes:
+ retval[hash] = not futures[hash].value
+ return retval
diff --git a/hgext/mq.py b/hgext/mq.py
new file mode 100644
index 0000000..33a31c4
--- /dev/null
+++ b/hgext/mq.py
@@ -0,0 +1,3597 @@
+# mq.py - patch queues for mercurial
+#
+# Copyright 2005, 2006 Chris Mason <mason@suse.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''manage a stack of patches
+
+This extension lets you work with a stack of patches in a Mercurial
+repository. It manages two stacks of patches - all known patches, and
+applied patches (subset of known patches).
+
+Known patches are represented as patch files in the .hg/patches
+directory. Applied patches are both patch files and changesets.
+
+Common tasks (use :hg:`help command` for more details)::
+
+ create new patch qnew
+ import existing patch qimport
+
+ print patch series qseries
+ print applied patches qapplied
+
+ add known patch to applied stack qpush
+ remove patch from applied stack qpop
+ refresh contents of top applied patch qrefresh
+
+By default, mq will automatically use git patches when required to
+avoid losing file mode changes, copy records, binary files or empty
+files creations or deletions. This behaviour can be configured with::
+
+ [mq]
+ git = auto/keep/yes/no
+
+If set to 'keep', mq will obey the [diff] section configuration while
+preserving existing git patches upon qrefresh. If set to 'yes' or
+'no', mq will override the [diff] section and always generate git or
+regular patches, possibly losing data in the second case.
+
+It may be desirable for mq changesets to be kept in the secret phase (see
+:hg:`help phases`), which can be enabled with the following setting::
+
+ [mq]
+ secret = True
+
+You will by default be managing a patch queue named "patches". You can
+create other, independent patch queues with the :hg:`qqueue` command.
+
+If the working directory contains uncommitted files, qpush, qpop and
+qgoto abort immediately. If -f/--force is used, the changes are
+discarded. Setting::
+
+ [mq]
+ keepchanges = True
+
+make them behave as if --keep-changes were passed, and non-conflicting
+local changes will be tolerated and preserved. If incompatible options
+such as -f/--force or --exact are passed, this setting is ignored.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex, short, nullid, nullrev
+from mercurial.lock import release
+from mercurial import commands, cmdutil, hg, scmutil, util, revset
+from mercurial import repair, extensions, url, error, phases
+from mercurial import patch as patchmod
+import os, re, errno, shutil
+
+commands.norepo += " qclone"
+
+seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+# Patch names looks like unix-file names.
+# They must be joinable with queue directory and result in the patch path.
+normname = util.normpath
+
+class statusentry(object):
+ def __init__(self, node, name):
+ self.node, self.name = node, name
+ def __repr__(self):
+ return hex(self.node) + ':' + self.name
+
+class patchheader(object):
+ def __init__(self, pf, plainmode=False):
+ def eatdiff(lines):
+ while lines:
+ l = lines[-1]
+ if (l.startswith("diff -") or
+ l.startswith("Index:") or
+ l.startswith("===========")):
+ del lines[-1]
+ else:
+ break
+ def eatempty(lines):
+ while lines:
+ if not lines[-1].strip():
+ del lines[-1]
+ else:
+ break
+
+ message = []
+ comments = []
+ user = None
+ date = None
+ parent = None
+ format = None
+ subject = None
+ branch = None
+ nodeid = None
+ diffstart = 0
+
+ for line in file(pf):
+ line = line.rstrip()
+ if (line.startswith('diff --git')
+ or (diffstart and line.startswith('+++ '))):
+ diffstart = 2
+ break
+ diffstart = 0 # reset
+ if line.startswith("--- "):
+ diffstart = 1
+ continue
+ elif format == "hgpatch":
+ # parse values when importing the result of an hg export
+ if line.startswith("# User "):
+ user = line[7:]
+ elif line.startswith("# Date "):
+ date = line[7:]
+ elif line.startswith("# Parent "):
+ parent = line[9:].lstrip()
+ elif line.startswith("# Branch "):
+ branch = line[9:]
+ elif line.startswith("# Node ID "):
+ nodeid = line[10:]
+ elif not line.startswith("# ") and line:
+ message.append(line)
+ format = None
+ elif line == '# HG changeset patch':
+ message = []
+ format = "hgpatch"
+ elif (format != "tagdone" and (line.startswith("Subject: ") or
+ line.startswith("subject: "))):
+ subject = line[9:]
+ format = "tag"
+ elif (format != "tagdone" and (line.startswith("From: ") or
+ line.startswith("from: "))):
+ user = line[6:]
+ format = "tag"
+ elif (format != "tagdone" and (line.startswith("Date: ") or
+ line.startswith("date: "))):
+ date = line[6:]
+ format = "tag"
+ elif format == "tag" and line == "":
+ # when looking for tags (subject: from: etc) they
+ # end once you find a blank line in the source
+ format = "tagdone"
+ elif message or line:
+ message.append(line)
+ comments.append(line)
+
+ eatdiff(message)
+ eatdiff(comments)
+ # Remember the exact starting line of the patch diffs before consuming
+ # empty lines, for external use by TortoiseHg and others
+ self.diffstartline = len(comments)
+ eatempty(message)
+ eatempty(comments)
+
+ # make sure message isn't empty
+ if format and format.startswith("tag") and subject:
+ message.insert(0, "")
+ message.insert(0, subject)
+
+ self.message = message
+ self.comments = comments
+ self.user = user
+ self.date = date
+ self.parent = parent
+ # nodeid and branch are for external use by TortoiseHg and others
+ self.nodeid = nodeid
+ self.branch = branch
+ self.haspatch = diffstart > 1
+ self.plainmode = plainmode
+
+ def setuser(self, user):
+ if not self.updateheader(['From: ', '# User '], user):
+ try:
+ patchheaderat = self.comments.index('# HG changeset patch')
+ self.comments.insert(patchheaderat + 1, '# User ' + user)
+ except ValueError:
+ if self.plainmode or self._hasheader(['Date: ']):
+ self.comments = ['From: ' + user] + self.comments
+ else:
+ tmp = ['# HG changeset patch', '# User ' + user, '']
+ self.comments = tmp + self.comments
+ self.user = user
+
+ def setdate(self, date):
+ if not self.updateheader(['Date: ', '# Date '], date):
+ try:
+ patchheaderat = self.comments.index('# HG changeset patch')
+ self.comments.insert(patchheaderat + 1, '# Date ' + date)
+ except ValueError:
+ if self.plainmode or self._hasheader(['From: ']):
+ self.comments = ['Date: ' + date] + self.comments
+ else:
+ tmp = ['# HG changeset patch', '# Date ' + date, '']
+ self.comments = tmp + self.comments
+ self.date = date
+
+ def setparent(self, parent):
+ if not self.updateheader(['# Parent '], parent):
+ try:
+ patchheaderat = self.comments.index('# HG changeset patch')
+ self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
+ except ValueError:
+ pass
+ self.parent = parent
+
+ def setmessage(self, message):
+ if self.comments:
+ self._delmsg()
+ self.message = [message]
+ self.comments += self.message
+
+ def updateheader(self, prefixes, new):
+ '''Update all references to a field in the patch header.
+ Return whether the field is present.'''
+ res = False
+ for prefix in prefixes:
+ for i in xrange(len(self.comments)):
+ if self.comments[i].startswith(prefix):
+ self.comments[i] = prefix + new
+ res = True
+ break
+ return res
+
+ def _hasheader(self, prefixes):
+ '''Check if a header starts with any of the given prefixes.'''
+ for prefix in prefixes:
+ for comment in self.comments:
+ if comment.startswith(prefix):
+ return True
+ return False
+
+ def __str__(self):
+ if not self.comments:
+ return ''
+ return '\n'.join(self.comments) + '\n\n'
+
+ def _delmsg(self):
+ '''Remove existing message, keeping the rest of the comments fields.
+ If comments contains 'subject: ', message will prepend
+ the field and a blank line.'''
+ if self.message:
+ subj = 'subject: ' + self.message[0].lower()
+ for i in xrange(len(self.comments)):
+ if subj == self.comments[i].lower():
+ del self.comments[i]
+ self.message = self.message[2:]
+ break
+ ci = 0
+ for mi in self.message:
+ while mi != self.comments[ci]:
+ ci += 1
+ del self.comments[ci]
+
+def newcommit(repo, phase, *args, **kwargs):
+ """helper dedicated to ensure a commit respect mq.secret setting
+
+ It should be used instead of repo.commit inside the mq source for operation
+ creating new changeset.
+ """
+ if phase is None:
+ if repo.ui.configbool('mq', 'secret', False):
+ phase = phases.secret
+ if phase is not None:
+ backup = repo.ui.backupconfig('phases', 'new-commit')
+ # Marking the repository as committing an mq patch can be used
+ # to optimize operations like _branchtags().
+ repo._committingpatch = True
+ try:
+ if phase is not None:
+ repo.ui.setconfig('phases', 'new-commit', phase)
+ return repo.commit(*args, **kwargs)
+ finally:
+ repo._committingpatch = False
+ if phase is not None:
+ repo.ui.restoreconfig(backup)
+
+class AbortNoCleanup(error.Abort):
+ pass
+
+class queue(object):
+ def __init__(self, ui, path, patchdir=None):
+ self.basepath = path
+ try:
+ fh = open(os.path.join(path, 'patches.queue'))
+ cur = fh.read().rstrip()
+ fh.close()
+ if not cur:
+ curpath = os.path.join(path, 'patches')
+ else:
+ curpath = os.path.join(path, 'patches-' + cur)
+ except IOError:
+ curpath = os.path.join(path, 'patches')
+ self.path = patchdir or curpath
+ self.opener = scmutil.opener(self.path)
+ self.ui = ui
+ self.applieddirty = False
+ self.seriesdirty = False
+ self.added = []
+ self.seriespath = "series"
+ self.statuspath = "status"
+ self.guardspath = "guards"
+ self.activeguards = None
+ self.guardsdirty = False
+ # Handle mq.git as a bool with extended values
+ try:
+ gitmode = ui.configbool('mq', 'git', None)
+ if gitmode is None:
+ raise error.ConfigError
+ self.gitmode = gitmode and 'yes' or 'no'
+ except error.ConfigError:
+ self.gitmode = ui.config('mq', 'git', 'auto').lower()
+ self.plainmode = ui.configbool('mq', 'plain', False)
+
+ @util.propertycache
+ def applied(self):
+ def parselines(lines):
+ for l in lines:
+ entry = l.split(':', 1)
+ if len(entry) > 1:
+ n, name = entry
+ yield statusentry(bin(n), name)
+ elif l.strip():
+ self.ui.warn(_('malformated mq status line: %s\n') % entry)
+ # else we ignore empty lines
+ try:
+ lines = self.opener.read(self.statuspath).splitlines()
+ return list(parselines(lines))
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ return []
+ raise
+
+ @util.propertycache
+ def fullseries(self):
+ try:
+ return self.opener.read(self.seriespath).splitlines()
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ return []
+ raise
+
+ @util.propertycache
+ def series(self):
+ self.parseseries()
+ return self.series
+
+ @util.propertycache
+ def seriesguards(self):
+ self.parseseries()
+ return self.seriesguards
+
+ def invalidate(self):
+ for a in 'applied fullseries series seriesguards'.split():
+ if a in self.__dict__:
+ delattr(self, a)
+ self.applieddirty = False
+ self.seriesdirty = False
+ self.guardsdirty = False
+ self.activeguards = None
+
+ def diffopts(self, opts={}, patchfn=None):
+ diffopts = patchmod.diffopts(self.ui, opts)
+ if self.gitmode == 'auto':
+ diffopts.upgrade = True
+ elif self.gitmode == 'keep':
+ pass
+ elif self.gitmode in ('yes', 'no'):
+ diffopts.git = self.gitmode == 'yes'
+ else:
+ raise util.Abort(_('mq.git option can be auto/keep/yes/no'
+ ' got %s') % self.gitmode)
+ if patchfn:
+ diffopts = self.patchopts(diffopts, patchfn)
+ return diffopts
+
+ def patchopts(self, diffopts, *patches):
+ """Return a copy of input diff options with git set to true if
+ referenced patch is a git patch and should be preserved as such.
+ """
+ diffopts = diffopts.copy()
+ if not diffopts.git and self.gitmode == 'keep':
+ for patchfn in patches:
+ patchf = self.opener(patchfn, 'r')
+ # if the patch was a git patch, refresh it as a git patch
+ for line in patchf:
+ if line.startswith('diff --git'):
+ diffopts.git = True
+ break
+ patchf.close()
+ return diffopts
+
+ def join(self, *p):
+ return os.path.join(self.path, *p)
+
+ def findseries(self, patch):
+ def matchpatch(l):
+ l = l.split('#', 1)[0]
+ return l.strip() == patch
+ for index, l in enumerate(self.fullseries):
+ if matchpatch(l):
+ return index
+ return None
+
+ guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
+
+ def parseseries(self):
+ self.series = []
+ self.seriesguards = []
+ for l in self.fullseries:
+ h = l.find('#')
+ if h == -1:
+ patch = l
+ comment = ''
+ elif h == 0:
+ continue
+ else:
+ patch = l[:h]
+ comment = l[h:]
+ patch = patch.strip()
+ if patch:
+ if patch in self.series:
+ raise util.Abort(_('%s appears more than once in %s') %
+ (patch, self.join(self.seriespath)))
+ self.series.append(patch)
+ self.seriesguards.append(self.guard_re.findall(comment))
+
+ def checkguard(self, guard):
+ if not guard:
+ return _('guard cannot be an empty string')
+ bad_chars = '# \t\r\n\f'
+ first = guard[0]
+ if first in '-+':
+ return (_('guard %r starts with invalid character: %r') %
+ (guard, first))
+ for c in bad_chars:
+ if c in guard:
+ return _('invalid character in guard %r: %r') % (guard, c)
+
+ def setactive(self, guards):
+ for guard in guards:
+ bad = self.checkguard(guard)
+ if bad:
+ raise util.Abort(bad)
+ guards = sorted(set(guards))
+ self.ui.debug('active guards: %s\n' % ' '.join(guards))
+ self.activeguards = guards
+ self.guardsdirty = True
+
+ def active(self):
+ if self.activeguards is None:
+ self.activeguards = []
+ try:
+ guards = self.opener.read(self.guardspath).split()
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ guards = []
+ for i, guard in enumerate(guards):
+ bad = self.checkguard(guard)
+ if bad:
+ self.ui.warn('%s:%d: %s\n' %
+ (self.join(self.guardspath), i + 1, bad))
+ else:
+ self.activeguards.append(guard)
+ return self.activeguards
+
+ def setguards(self, idx, guards):
+ for g in guards:
+ if len(g) < 2:
+ raise util.Abort(_('guard %r too short') % g)
+ if g[0] not in '-+':
+ raise util.Abort(_('guard %r starts with invalid char') % g)
+ bad = self.checkguard(g[1:])
+ if bad:
+ raise util.Abort(bad)
+ drop = self.guard_re.sub('', self.fullseries[idx])
+ self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
+ self.parseseries()
+ self.seriesdirty = True
+
+ def pushable(self, idx):
+ if isinstance(idx, str):
+ idx = self.series.index(idx)
+ patchguards = self.seriesguards[idx]
+ if not patchguards:
+ return True, None
+ guards = self.active()
+ exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
+ if exactneg:
+ return False, repr(exactneg[0])
+ pos = [g for g in patchguards if g[0] == '+']
+ exactpos = [g for g in pos if g[1:] in guards]
+ if pos:
+ if exactpos:
+ return True, repr(exactpos[0])
+ return False, ' '.join(map(repr, pos))
+ return True, ''
+
+ def explainpushable(self, idx, all_patches=False):
+ write = all_patches and self.ui.write or self.ui.warn
+ if all_patches or self.ui.verbose:
+ if isinstance(idx, str):
+ idx = self.series.index(idx)
+ pushable, why = self.pushable(idx)
+ if all_patches and pushable:
+ if why is None:
+ write(_('allowing %s - no guards in effect\n') %
+ self.series[idx])
+ else:
+ if not why:
+ write(_('allowing %s - no matching negative guards\n') %
+ self.series[idx])
+ else:
+ write(_('allowing %s - guarded by %s\n') %
+ (self.series[idx], why))
+ if not pushable:
+ if why:
+ write(_('skipping %s - guarded by %s\n') %
+ (self.series[idx], why))
+ else:
+ write(_('skipping %s - no matching guards\n') %
+ self.series[idx])
+
+ def savedirty(self):
+ def writelist(items, path):
+ fp = self.opener(path, 'w')
+ for i in items:
+ fp.write("%s\n" % i)
+ fp.close()
+ if self.applieddirty:
+ writelist(map(str, self.applied), self.statuspath)
+ self.applieddirty = False
+ if self.seriesdirty:
+ writelist(self.fullseries, self.seriespath)
+ self.seriesdirty = False
+ if self.guardsdirty:
+ writelist(self.activeguards, self.guardspath)
+ self.guardsdirty = False
+ if self.added:
+ qrepo = self.qrepo()
+ if qrepo:
+ qrepo[None].add(f for f in self.added if f not in qrepo[None])
+ self.added = []
+
+ def removeundo(self, repo):
+ undo = repo.sjoin('undo')
+ if not os.path.exists(undo):
+ return
+ try:
+ os.unlink(undo)
+ except OSError, inst:
+ self.ui.warn(_('error removing undo: %s\n') % str(inst))
+
+ def backup(self, repo, files, copy=False):
+ # backup local changes in --force case
+ for f in sorted(files):
+ absf = repo.wjoin(f)
+ if os.path.lexists(absf):
+ self.ui.note(_('saving current version of %s as %s\n') %
+ (f, f + '.orig'))
+ if copy:
+ util.copyfile(absf, absf + '.orig')
+ else:
+ util.rename(absf, absf + '.orig')
+
+ def printdiff(self, repo, diffopts, node1, node2=None, files=None,
+ fp=None, changes=None, opts={}):
+ stat = opts.get('stat')
+ m = scmutil.match(repo[node1], files, opts)
+ cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
+ changes, stat, fp)
+
+ def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
+ # first try just applying the patch
+ (err, n) = self.apply(repo, [patch], update_status=False,
+ strict=True, merge=rev)
+
+ if err == 0:
+ return (err, n)
+
+ if n is None:
+ raise util.Abort(_("apply failed for patch %s") % patch)
+
+ self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
+
+ # apply failed, strip away that rev and merge.
+ hg.clean(repo, head)
+ self.strip(repo, [n], update=False, backup='strip')
+
+ ctx = repo[rev]
+ ret = hg.merge(repo, rev)
+ if ret:
+ raise util.Abort(_("update returned %d") % ret)
+ n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
+ if n is None:
+ raise util.Abort(_("repo commit failed"))
+ try:
+ ph = patchheader(mergeq.join(patch), self.plainmode)
+ except Exception:
+ raise util.Abort(_("unable to read %s") % patch)
+
+ diffopts = self.patchopts(diffopts, patch)
+ patchf = self.opener(patch, "w")
+ comments = str(ph)
+ if comments:
+ patchf.write(comments)
+ self.printdiff(repo, diffopts, head, n, fp=patchf)
+ patchf.close()
+ self.removeundo(repo)
+ return (0, n)
+
+ def qparents(self, repo, rev=None):
+ if rev is None:
+ (p1, p2) = repo.dirstate.parents()
+ if p2 == nullid:
+ return p1
+ if not self.applied:
+ return None
+ return self.applied[-1].node
+ p1, p2 = repo.changelog.parents(rev)
+ if p2 != nullid and p2 in [x.node for x in self.applied]:
+ return p2
+ return p1
+
+ def mergepatch(self, repo, mergeq, series, diffopts):
+ if not self.applied:
+ # each of the patches merged in will have two parents. This
+ # can confuse the qrefresh, qdiff, and strip code because it
+ # needs to know which parent is actually in the patch queue.
+ # so, we insert a merge marker with only one parent. This way
+ # the first patch in the queue is never a merge patch
+ #
+ pname = ".hg.patches.merge.marker"
+ n = newcommit(repo, None, '[mq]: merge marker', force=True)
+ self.removeundo(repo)
+ self.applied.append(statusentry(n, pname))
+ self.applieddirty = True
+
+ head = self.qparents(repo)
+
+ for patch in series:
+ patch = mergeq.lookup(patch, strict=True)
+ if not patch:
+ self.ui.warn(_("patch %s does not exist\n") % patch)
+ return (1, None)
+ pushable, reason = self.pushable(patch)
+ if not pushable:
+ self.explainpushable(patch, all_patches=True)
+ continue
+ info = mergeq.isapplied(patch)
+ if not info:
+ self.ui.warn(_("patch %s is not applied\n") % patch)
+ return (1, None)
+ rev = info[1]
+ err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
+ if head:
+ self.applied.append(statusentry(head, patch))
+ self.applieddirty = True
+ if err:
+ return (err, head)
+ self.savedirty()
+ return (0, head)
+
+ def patch(self, repo, patchfile):
+ '''Apply patchfile to the working directory.
+ patchfile: name of patch file'''
+ files = set()
+ try:
+ fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
+ files=files, eolmode=None)
+ return (True, list(files), fuzz)
+ except Exception, inst:
+ self.ui.note(str(inst) + '\n')
+ if not self.ui.verbose:
+ self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
+ self.ui.traceback()
+ return (False, list(files), False)
+
+ def apply(self, repo, series, list=False, update_status=True,
+ strict=False, patchdir=None, merge=None, all_files=None,
+ tobackup=None, keepchanges=False):
+ wlock = lock = tr = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ tr = repo.transaction("qpush")
+ try:
+ ret = self._apply(repo, series, list, update_status,
+ strict, patchdir, merge, all_files=all_files,
+ tobackup=tobackup, keepchanges=keepchanges)
+ tr.close()
+ self.savedirty()
+ return ret
+ except AbortNoCleanup:
+ tr.close()
+ self.savedirty()
+ return 2, repo.dirstate.p1()
+ except: # re-raises
+ try:
+ tr.abort()
+ finally:
+ repo.invalidate()
+ repo.dirstate.invalidate()
+ self.invalidate()
+ raise
+ finally:
+ release(tr, lock, wlock)
+ self.removeundo(repo)
+
+ def _apply(self, repo, series, list=False, update_status=True,
+ strict=False, patchdir=None, merge=None, all_files=None,
+ tobackup=None, keepchanges=False):
+ """returns (error, hash)
+
+ error = 1 for unable to read, 2 for patch failed, 3 for patch
+ fuzz. tobackup is None or a set of files to backup before they
+ are modified by a patch.
+ """
+ # TODO unify with commands.py
+ if not patchdir:
+ patchdir = self.path
+ err = 0
+ n = None
+ for patchname in series:
+ pushable, reason = self.pushable(patchname)
+ if not pushable:
+ self.explainpushable(patchname, all_patches=True)
+ continue
+ self.ui.status(_("applying %s\n") % patchname)
+ pf = os.path.join(patchdir, patchname)
+
+ try:
+ ph = patchheader(self.join(patchname), self.plainmode)
+ except IOError:
+ self.ui.warn(_("unable to read %s\n") % patchname)
+ err = 1
+ break
+
+ message = ph.message
+ if not message:
+ # The commit message should not be translated
+ message = "imported patch %s\n" % patchname
+ else:
+ if list:
+ # The commit message should not be translated
+ message.append("\nimported patch %s" % patchname)
+ message = '\n'.join(message)
+
+ if ph.haspatch:
+ if tobackup:
+ touched = patchmod.changedfiles(self.ui, repo, pf)
+ touched = set(touched) & tobackup
+ if touched and keepchanges:
+ raise AbortNoCleanup(
+ _("local changes found, refresh first"))
+ self.backup(repo, touched, copy=True)
+ tobackup = tobackup - touched
+ (patcherr, files, fuzz) = self.patch(repo, pf)
+ if all_files is not None:
+ all_files.update(files)
+ patcherr = not patcherr
+ else:
+ self.ui.warn(_("patch %s is empty\n") % patchname)
+ patcherr, files, fuzz = 0, [], 0
+
+ if merge and files:
+ # Mark as removed/merged and update dirstate parent info
+ removed = []
+ merged = []
+ for f in files:
+ if os.path.lexists(repo.wjoin(f)):
+ merged.append(f)
+ else:
+ removed.append(f)
+ for f in removed:
+ repo.dirstate.remove(f)
+ for f in merged:
+ repo.dirstate.merge(f)
+ p1, p2 = repo.dirstate.parents()
+ repo.setparents(p1, merge)
+
+ match = scmutil.matchfiles(repo, files or [])
+ oldtip = repo['tip']
+ n = newcommit(repo, None, message, ph.user, ph.date, match=match,
+ force=True)
+ if repo['tip'] == oldtip:
+ raise util.Abort(_("qpush exactly duplicates child changeset"))
+ if n is None:
+ raise util.Abort(_("repository commit failed"))
+
+ if update_status:
+ self.applied.append(statusentry(n, patchname))
+
+ if patcherr:
+ self.ui.warn(_("patch failed, rejects left in working dir\n"))
+ err = 2
+ break
+
+ if fuzz and strict:
+ self.ui.warn(_("fuzz found when applying patch, stopping\n"))
+ err = 3
+ break
+ return (err, n)
+
+ def _cleanup(self, patches, numrevs, keep=False):
+ if not keep:
+ r = self.qrepo()
+ if r:
+ r[None].forget(patches)
+ for p in patches:
+ os.unlink(self.join(p))
+
+ qfinished = []
+ if numrevs:
+ qfinished = self.applied[:numrevs]
+ del self.applied[:numrevs]
+ self.applieddirty = True
+
+ unknown = []
+
+ for (i, p) in sorted([(self.findseries(p), p) for p in patches],
+ reverse=True):
+ if i is not None:
+ del self.fullseries[i]
+ else:
+ unknown.append(p)
+
+ if unknown:
+ if numrevs:
+ rev = dict((entry.name, entry.node) for entry in qfinished)
+ for p in unknown:
+ msg = _('revision %s refers to unknown patches: %s\n')
+ self.ui.warn(msg % (short(rev[p]), p))
+ else:
+ msg = _('unknown patches: %s\n')
+ raise util.Abort(''.join(msg % p for p in unknown))
+
+ self.parseseries()
+ self.seriesdirty = True
+ return [entry.node for entry in qfinished]
+
+ def _revpatches(self, repo, revs):
+ firstrev = repo[self.applied[0].node].rev()
+ patches = []
+ for i, rev in enumerate(revs):
+
+ if rev < firstrev:
+ raise util.Abort(_('revision %d is not managed') % rev)
+
+ ctx = repo[rev]
+ base = self.applied[i].node
+ if ctx.node() != base:
+ msg = _('cannot delete revision %d above applied patches')
+ raise util.Abort(msg % rev)
+
+ patch = self.applied[i].name
+ for fmt in ('[mq]: %s', 'imported patch %s'):
+ if ctx.description() == fmt % patch:
+ msg = _('patch %s finalized without changeset message\n')
+ repo.ui.status(msg % patch)
+ break
+
+ patches.append(patch)
+ return patches
+
+ def finish(self, repo, revs):
+ # Manually trigger phase computation to ensure phasedefaults is
+ # executed before we remove the patches.
+ repo._phasecache
+ patches = self._revpatches(repo, sorted(revs))
+ qfinished = self._cleanup(patches, len(patches))
+ if qfinished and repo.ui.configbool('mq', 'secret', False):
+ # only use this logic when the secret option is added
+ oldqbase = repo[qfinished[0]]
+ tphase = repo.ui.config('phases', 'new-commit', phases.draft)
+ if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
+ phases.advanceboundary(repo, tphase, qfinished)
+
+ def delete(self, repo, patches, opts):
+ if not patches and not opts.get('rev'):
+ raise util.Abort(_('qdelete requires at least one revision or '
+ 'patch name'))
+
+ realpatches = []
+ for patch in patches:
+ patch = self.lookup(patch, strict=True)
+ info = self.isapplied(patch)
+ if info:
+ raise util.Abort(_("cannot delete applied patch %s") % patch)
+ if patch not in self.series:
+ raise util.Abort(_("patch %s not in series file") % patch)
+ if patch not in realpatches:
+ realpatches.append(patch)
+
+ numrevs = 0
+ if opts.get('rev'):
+ if not self.applied:
+ raise util.Abort(_('no patches applied'))
+ revs = scmutil.revrange(repo, opts.get('rev'))
+ if len(revs) > 1 and revs[0] > revs[1]:
+ revs.reverse()
+ revpatches = self._revpatches(repo, revs)
+ realpatches += revpatches
+ numrevs = len(revpatches)
+
+ self._cleanup(realpatches, numrevs, opts.get('keep'))
+
+ def checktoppatch(self, repo):
+ if self.applied:
+ top = self.applied[-1].node
+ patch = self.applied[-1].name
+ pp = repo.dirstate.parents()
+ if top not in pp:
+ raise util.Abort(_("working directory revision is not qtip"))
+ return top, patch
+ return None, None
+
+ def checksubstate(self, repo, baserev=None):
+ '''return list of subrepos at a different revision than substate.
+ Abort if any subrepos have uncommitted changes.'''
+ inclsubs = []
+ wctx = repo[None]
+ if baserev:
+ bctx = repo[baserev]
+ else:
+ bctx = wctx.parents()[0]
+ for s in wctx.substate:
+ if wctx.sub(s).dirty(True):
+ raise util.Abort(
+ _("uncommitted changes in subrepository %s") % s)
+ elif s not in bctx.substate or bctx.sub(s).dirty():
+ inclsubs.append(s)
+ return inclsubs
+
+ def putsubstate2changes(self, substatestate, changes):
+ for files in changes[:3]:
+ if '.hgsubstate' in files:
+ return # already listed up
+ # not yet listed up
+ if substatestate in 'a?':
+ changes[1].append('.hgsubstate')
+ elif substatestate in 'r':
+ changes[2].append('.hgsubstate')
+ else: # modified
+ changes[0].append('.hgsubstate')
+
+ def localchangesfound(self, refresh=True):
+ if refresh:
+ raise util.Abort(_("local changes found, refresh first"))
+ else:
+ raise util.Abort(_("local changes found"))
+
+ def checklocalchanges(self, repo, force=False, refresh=True):
+ m, a, r, d = repo.status()[:4]
+ if (m or a or r or d) and not force:
+ self.localchangesfound(refresh)
+ return m, a, r, d
+
+ _reserved = ('series', 'status', 'guards', '.', '..')
+ def checkreservedname(self, name):
+ if name in self._reserved:
+ raise util.Abort(_('"%s" cannot be used as the name of a patch')
+ % name)
+ for prefix in ('.hg', '.mq'):
+ if name.startswith(prefix):
+ raise util.Abort(_('patch name cannot begin with "%s"')
+ % prefix)
+ for c in ('#', ':'):
+ if c in name:
+ raise util.Abort(_('"%s" cannot be used in the name of a patch')
+ % c)
+
+ def checkpatchname(self, name, force=False):
+ self.checkreservedname(name)
+ if not force and os.path.exists(self.join(name)):
+ if os.path.isdir(self.join(name)):
+ raise util.Abort(_('"%s" already exists as a directory')
+ % name)
+ else:
+ raise util.Abort(_('patch "%s" already exists') % name)
+
+ def checkkeepchanges(self, keepchanges, force):
+ if force and keepchanges:
+ raise util.Abort(_('cannot use both --force and --keep-changes'))
+
+ def new(self, repo, patchfn, *pats, **opts):
+ """options:
+ msg: a string or a no-argument function returning a string
+ """
+ msg = opts.get('msg')
+ user = opts.get('user')
+ date = opts.get('date')
+ if date:
+ date = util.parsedate(date)
+ diffopts = self.diffopts({'git': opts.get('git')})
+ if opts.get('checkname', True):
+ self.checkpatchname(patchfn)
+ inclsubs = self.checksubstate(repo)
+ if inclsubs:
+ inclsubs.append('.hgsubstate')
+ substatestate = repo.dirstate['.hgsubstate']
+ if opts.get('include') or opts.get('exclude') or pats:
+ if inclsubs:
+ pats = list(pats or []) + inclsubs
+ match = scmutil.match(repo[None], pats, opts)
+ # detect missing files in pats
+ def badfn(f, msg):
+ if f != '.hgsubstate': # .hgsubstate is auto-created
+ raise util.Abort('%s: %s' % (f, msg))
+ match.bad = badfn
+ changes = repo.status(match=match)
+ m, a, r, d = changes[:4]
+ else:
+ changes = self.checklocalchanges(repo, force=True)
+ m, a, r, d = changes
+ match = scmutil.matchfiles(repo, m + a + r + inclsubs)
+ if len(repo[None].parents()) > 1:
+ raise util.Abort(_('cannot manage merge changesets'))
+ commitfiles = m + a + r
+ self.checktoppatch(repo)
+ insert = self.fullseriesend()
+ wlock = repo.wlock()
+ try:
+ try:
+ # if patch file write fails, abort early
+ p = self.opener(patchfn, "w")
+ except IOError, e:
+ raise util.Abort(_('cannot write patch "%s": %s')
+ % (patchfn, e.strerror))
+ try:
+ if self.plainmode:
+ if user:
+ p.write("From: " + user + "\n")
+ if not date:
+ p.write("\n")
+ if date:
+ p.write("Date: %d %d\n\n" % date)
+ else:
+ p.write("# HG changeset patch\n")
+ p.write("# Parent "
+ + hex(repo[None].p1().node()) + "\n")
+ if user:
+ p.write("# User " + user + "\n")
+ if date:
+ p.write("# Date %s %s\n\n" % date)
+ if util.safehasattr(msg, '__call__'):
+ msg = msg()
+ commitmsg = msg and msg or ("[mq]: %s" % patchfn)
+ n = newcommit(repo, None, commitmsg, user, date, match=match,
+ force=True)
+ if n is None:
+ raise util.Abort(_("repo commit failed"))
+ try:
+ self.fullseries[insert:insert] = [patchfn]
+ self.applied.append(statusentry(n, patchfn))
+ self.parseseries()
+ self.seriesdirty = True
+ self.applieddirty = True
+ if msg:
+ msg = msg + "\n\n"
+ p.write(msg)
+ if commitfiles:
+ parent = self.qparents(repo, n)
+ if inclsubs:
+ self.putsubstate2changes(substatestate, changes)
+ chunks = patchmod.diff(repo, node1=parent, node2=n,
+ changes=changes, opts=diffopts)
+ for chunk in chunks:
+ p.write(chunk)
+ p.close()
+ r = self.qrepo()
+ if r:
+ r[None].add([patchfn])
+ except: # re-raises
+ repo.rollback()
+ raise
+ except Exception:
+ patchpath = self.join(patchfn)
+ try:
+ os.unlink(patchpath)
+ except OSError:
+ self.ui.warn(_('error unlinking %s\n') % patchpath)
+ raise
+ self.removeundo(repo)
+ finally:
+ release(wlock)
+
+ def strip(self, repo, revs, update=True, backup="all", force=None):
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+
+ if update:
+ self.checklocalchanges(repo, force=force, refresh=False)
+ urev = self.qparents(repo, revs[0])
+ hg.clean(repo, urev)
+ repo.dirstate.write()
+
+ repair.strip(self.ui, repo, revs, backup)
+ finally:
+ release(lock, wlock)
+
+ def isapplied(self, patch):
+ """returns (index, rev, patch)"""
+ for i, a in enumerate(self.applied):
+ if a.name == patch:
+ return (i, a.node, a.name)
+ return None
+
+ # if the exact patch name does not exist, we try a few
+ # variations. If strict is passed, we try only #1
+ #
+ # 1) a number (as string) to indicate an offset in the series file
+ # 2) a unique substring of the patch name was given
+ # 3) patchname[-+]num to indicate an offset in the series file
+ def lookup(self, patch, strict=False):
+ def partialname(s):
+ if s in self.series:
+ return s
+ matches = [x for x in self.series if s in x]
+ if len(matches) > 1:
+ self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
+ for m in matches:
+ self.ui.warn(' %s\n' % m)
+ return None
+ if matches:
+ return matches[0]
+ if self.series and self.applied:
+ if s == 'qtip':
+ return self.series[self.seriesend(True)-1]
+ if s == 'qbase':
+ return self.series[0]
+ return None
+
+ if patch in self.series:
+ return patch
+
+ if not os.path.isfile(self.join(patch)):
+ try:
+ sno = int(patch)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ if -len(self.series) <= sno < len(self.series):
+ return self.series[sno]
+
+ if not strict:
+ res = partialname(patch)
+ if res:
+ return res
+ minus = patch.rfind('-')
+ if minus >= 0:
+ res = partialname(patch[:minus])
+ if res:
+ i = self.series.index(res)
+ try:
+ off = int(patch[minus + 1:] or 1)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ if i - off >= 0:
+ return self.series[i - off]
+ plus = patch.rfind('+')
+ if plus >= 0:
+ res = partialname(patch[:plus])
+ if res:
+ i = self.series.index(res)
+ try:
+ off = int(patch[plus + 1:] or 1)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ if i + off < len(self.series):
+ return self.series[i + off]
+ raise util.Abort(_("patch %s not in series") % patch)
+
+ def push(self, repo, patch=None, force=False, list=False, mergeq=None,
+ all=False, move=False, exact=False, nobackup=False,
+ keepchanges=False):
+ self.checkkeepchanges(keepchanges, force)
+ diffopts = self.diffopts()
+ wlock = repo.wlock()
+ try:
+ heads = []
+ for b, ls in repo.branchmap().iteritems():
+ heads += ls
+ if not heads:
+ heads = [nullid]
+ if repo.dirstate.p1() not in heads and not exact:
+ self.ui.status(_("(working directory not at a head)\n"))
+
+ if not self.series:
+ self.ui.warn(_('no patches in series\n'))
+ return 0
+
+ # Suppose our series file is: A B C and the current 'top'
+ # patch is B. qpush C should be performed (moving forward)
+ # qpush B is a NOP (no change) qpush A is an error (can't
+ # go backwards with qpush)
+ if patch:
+ patch = self.lookup(patch)
+ info = self.isapplied(patch)
+ if info and info[0] >= len(self.applied) - 1:
+ self.ui.warn(
+ _('qpush: %s is already at the top\n') % patch)
+ return 0
+
+ pushable, reason = self.pushable(patch)
+ if pushable:
+ if self.series.index(patch) < self.seriesend():
+ raise util.Abort(
+ _("cannot push to a previous patch: %s") % patch)
+ else:
+ if reason:
+ reason = _('guarded by %s') % reason
+ else:
+ reason = _('no matching guards')
+ self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
+ return 1
+ elif all:
+ patch = self.series[-1]
+ if self.isapplied(patch):
+ self.ui.warn(_('all patches are currently applied\n'))
+ return 0
+
+ # Following the above example, starting at 'top' of B:
+ # qpush should be performed (pushes C), but a subsequent
+ # qpush without an argument is an error (nothing to
+ # apply). This allows a loop of "...while hg qpush..." to
+ # work as it detects an error when done
+ start = self.seriesend()
+ if start == len(self.series):
+ self.ui.warn(_('patch series already fully applied\n'))
+ return 1
+ if not force and not keepchanges:
+ self.checklocalchanges(repo, refresh=self.applied)
+
+ if exact:
+ if keepchanges:
+ raise util.Abort(
+ _("cannot use --exact and --keep-changes together"))
+ if move:
+ raise util.Abort(_('cannot use --exact and --move '
+ 'together'))
+ if self.applied:
+ raise util.Abort(_('cannot push --exact with applied '
+ 'patches'))
+ root = self.series[start]
+ target = patchheader(self.join(root), self.plainmode).parent
+ if not target:
+ raise util.Abort(
+ _("%s does not have a parent recorded") % root)
+ if not repo[target] == repo['.']:
+ hg.update(repo, target)
+
+ if move:
+ if not patch:
+ raise util.Abort(_("please specify the patch to move"))
+ for fullstart, rpn in enumerate(self.fullseries):
+ # strip markers for patch guards
+ if self.guard_re.split(rpn, 1)[0] == self.series[start]:
+ break
+ for i, rpn in enumerate(self.fullseries[fullstart:]):
+ # strip markers for patch guards
+ if self.guard_re.split(rpn, 1)[0] == patch:
+ break
+ index = fullstart + i
+ assert index < len(self.fullseries)
+ fullpatch = self.fullseries[index]
+ del self.fullseries[index]
+ self.fullseries.insert(fullstart, fullpatch)
+ self.parseseries()
+ self.seriesdirty = True
+
+ self.applieddirty = True
+ if start > 0:
+ self.checktoppatch(repo)
+ if not patch:
+ patch = self.series[start]
+ end = start + 1
+ else:
+ end = self.series.index(patch, start) + 1
+
+ tobackup = set()
+ if (not nobackup and force) or keepchanges:
+ m, a, r, d = self.checklocalchanges(repo, force=True)
+ if keepchanges:
+ tobackup.update(m + a + r + d)
+ else:
+ tobackup.update(m + a)
+
+ s = self.series[start:end]
+ all_files = set()
+ try:
+ if mergeq:
+ ret = self.mergepatch(repo, mergeq, s, diffopts)
+ else:
+ ret = self.apply(repo, s, list, all_files=all_files,
+ tobackup=tobackup, keepchanges=keepchanges)
+ except: # re-raises
+ self.ui.warn(_('cleaning up working directory...'))
+ node = repo.dirstate.p1()
+ hg.revert(repo, node, None)
+ # only remove unknown files that we know we touched or
+ # created while patching
+ for f in all_files:
+ if f not in repo.dirstate:
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ self.ui.warn(_('done\n'))
+ raise
+
+ if not self.applied:
+ return ret[0]
+ top = self.applied[-1].name
+ if ret[0] and ret[0] > 1:
+ msg = _("errors during apply, please fix and refresh %s\n")
+ self.ui.write(msg % top)
+ else:
+ self.ui.write(_("now at: %s\n") % top)
+ return ret[0]
+
+ finally:
+ wlock.release()
+
+ def pop(self, repo, patch=None, force=False, update=True, all=False,
+ nobackup=False, keepchanges=False):
+ self.checkkeepchanges(keepchanges, force)
+ wlock = repo.wlock()
+ try:
+ if patch:
+ # index, rev, patch
+ info = self.isapplied(patch)
+ if not info:
+ patch = self.lookup(patch)
+ info = self.isapplied(patch)
+ if not info:
+ raise util.Abort(_("patch %s is not applied") % patch)
+
+ if not self.applied:
+ # Allow qpop -a to work repeatedly,
+ # but not qpop without an argument
+ self.ui.warn(_("no patches applied\n"))
+ return not all
+
+ if all:
+ start = 0
+ elif patch:
+ start = info[0] + 1
+ else:
+ start = len(self.applied) - 1
+
+ if start >= len(self.applied):
+ self.ui.warn(_("qpop: %s is already at the top\n") % patch)
+ return
+
+ if not update:
+ parents = repo.dirstate.parents()
+ rr = [x.node for x in self.applied]
+ for p in parents:
+ if p in rr:
+ self.ui.warn(_("qpop: forcing dirstate update\n"))
+ update = True
+ else:
+ parents = [p.node() for p in repo[None].parents()]
+ needupdate = False
+ for entry in self.applied[start:]:
+ if entry.node in parents:
+ needupdate = True
+ break
+ update = needupdate
+
+ tobackup = set()
+ if update:
+ m, a, r, d = self.checklocalchanges(
+ repo, force=force or keepchanges)
+ if force:
+ if not nobackup:
+ tobackup.update(m + a)
+ elif keepchanges:
+ tobackup.update(m + a + r + d)
+
+ self.applieddirty = True
+ end = len(self.applied)
+ rev = self.applied[start].node
+ if update:
+ top = self.checktoppatch(repo)[0]
+
+ try:
+ heads = repo.changelog.heads(rev)
+ except error.LookupError:
+ node = short(rev)
+ raise util.Abort(_('trying to pop unknown node %s') % node)
+
+ if heads != [self.applied[-1].node]:
+ raise util.Abort(_("popping would remove a revision not "
+ "managed by this patch queue"))
+ if not repo[self.applied[-1].node].mutable():
+ raise util.Abort(
+ _("popping would remove an immutable revision"),
+ hint=_('see "hg help phases" for details'))
+
+ # we know there are no local changes, so we can make a simplified
+ # form of hg.update.
+ if update:
+ qp = self.qparents(repo, rev)
+ ctx = repo[qp]
+ m, a, r, d = repo.status(qp, top)[:4]
+ if d:
+ raise util.Abort(_("deletions found between repo revs"))
+
+ tobackup = set(a + m + r) & tobackup
+ if keepchanges and tobackup:
+ self.localchangesfound()
+ self.backup(repo, tobackup)
+
+ for f in a:
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ repo.dirstate.drop(f)
+ for f in m + r:
+ fctx = ctx[f]
+ repo.wwrite(f, fctx.data(), fctx.flags())
+ repo.dirstate.normal(f)
+ repo.setparents(qp, nullid)
+ for patch in reversed(self.applied[start:end]):
+ self.ui.status(_("popping %s\n") % patch.name)
+ del self.applied[start:end]
+ self.strip(repo, [rev], update=False, backup='strip')
+ if self.applied:
+ self.ui.write(_("now at: %s\n") % self.applied[-1].name)
+ else:
+ self.ui.write(_("patch queue now empty\n"))
+ finally:
+ wlock.release()
+
+ def diff(self, repo, pats, opts):
+ top, patch = self.checktoppatch(repo)
+ if not top:
+ self.ui.write(_("no patches applied\n"))
+ return
+ qp = self.qparents(repo, top)
+ if opts.get('reverse'):
+ node1, node2 = None, qp
+ else:
+ node1, node2 = qp, None
+ diffopts = self.diffopts(opts, patch)
+ self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
+
+ def refresh(self, repo, pats=None, **opts):
+ if not self.applied:
+ self.ui.write(_("no patches applied\n"))
+ return 1
+ msg = opts.get('msg', '').rstrip()
+ newuser = opts.get('user')
+ newdate = opts.get('date')
+ if newdate:
+ newdate = '%d %d' % util.parsedate(newdate)
+ wlock = repo.wlock()
+
+ try:
+ self.checktoppatch(repo)
+ (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
+ if repo.changelog.heads(top) != [top]:
+ raise util.Abort(_("cannot refresh a revision with children"))
+ if not repo[top].mutable():
+ raise util.Abort(_("cannot refresh immutable revision"),
+ hint=_('see "hg help phases" for details'))
+
+ cparents = repo.changelog.parents(top)
+ patchparent = self.qparents(repo, top)
+
+ inclsubs = self.checksubstate(repo, hex(patchparent))
+ if inclsubs:
+ inclsubs.append('.hgsubstate')
+ substatestate = repo.dirstate['.hgsubstate']
+
+ ph = patchheader(self.join(patchfn), self.plainmode)
+ diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
+ if msg:
+ ph.setmessage(msg)
+ if newuser:
+ ph.setuser(newuser)
+ if newdate:
+ ph.setdate(newdate)
+ ph.setparent(hex(patchparent))
+
+ # only commit new patch when write is complete
+ patchf = self.opener(patchfn, 'w', atomictemp=True)
+
+ comments = str(ph)
+ if comments:
+ patchf.write(comments)
+
+ # update the dirstate in place, strip off the qtip commit
+ # and then commit.
+ #
+ # this should really read:
+ # mm, dd, aa = repo.status(top, patchparent)[:3]
+ # but we do it backwards to take advantage of manifest/chlog
+ # caching against the next repo.status call
+ mm, aa, dd = repo.status(patchparent, top)[:3]
+ changes = repo.changelog.read(top)
+ man = repo.manifest.read(changes[0])
+ aaa = aa[:]
+ matchfn = scmutil.match(repo[None], pats, opts)
+ # in short mode, we only diff the files included in the
+ # patch already plus specified files
+ if opts.get('short'):
+ # if amending a patch, we start with existing
+ # files plus specified files - unfiltered
+ match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
+ # filter with inc/exl options
+ matchfn = scmutil.match(repo[None], opts=opts)
+ else:
+ match = scmutil.matchall(repo)
+ m, a, r, d = repo.status(match=match)[:4]
+ mm = set(mm)
+ aa = set(aa)
+ dd = set(dd)
+
+ # we might end up with files that were added between
+ # qtip and the dirstate parent, but then changed in the
+ # local dirstate. in this case, we want them to only
+ # show up in the added section
+ for x in m:
+ if x not in aa:
+ mm.add(x)
+ # we might end up with files added by the local dirstate that
+ # were deleted by the patch. In this case, they should only
+ # show up in the changed section.
+ for x in a:
+ if x in dd:
+ dd.remove(x)
+ mm.add(x)
+ else:
+ aa.add(x)
+ # make sure any files deleted in the local dirstate
+ # are not in the add or change column of the patch
+ forget = []
+ for x in d + r:
+ if x in aa:
+ aa.remove(x)
+ forget.append(x)
+ continue
+ else:
+ mm.discard(x)
+ dd.add(x)
+
+ m = list(mm)
+ r = list(dd)
+ a = list(aa)
+ c = [filter(matchfn, l) for l in (m, a, r)]
+ match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
+
+ try:
+ if diffopts.git or diffopts.upgrade:
+ copies = {}
+ for dst in a:
+ src = repo.dirstate.copied(dst)
+ # during qfold, the source file for copies may
+ # be removed. Treat this as a simple add.
+ if src is not None and src in repo.dirstate:
+ copies.setdefault(src, []).append(dst)
+ repo.dirstate.add(dst)
+ # remember the copies between patchparent and qtip
+ for dst in aaa:
+ f = repo.file(dst)
+ src = f.renamed(man[dst])
+ if src:
+ copies.setdefault(src[0], []).extend(
+ copies.get(dst, []))
+ if dst in a:
+ copies[src[0]].append(dst)
+ # we can't copy a file created by the patch itself
+ if dst in copies:
+ del copies[dst]
+ for src, dsts in copies.iteritems():
+ for dst in dsts:
+ repo.dirstate.copy(src, dst)
+ else:
+ for dst in a:
+ repo.dirstate.add(dst)
+ # Drop useless copy information
+ for f in list(repo.dirstate.copies()):
+ repo.dirstate.copy(None, f)
+ for f in r:
+ repo.dirstate.remove(f)
+ # if the patch excludes a modified file, mark that
+ # file with mtime=0 so status can see it.
+ mm = []
+ for i in xrange(len(m)-1, -1, -1):
+ if not matchfn(m[i]):
+ mm.append(m[i])
+ del m[i]
+ for f in m:
+ repo.dirstate.normal(f)
+ for f in mm:
+ repo.dirstate.normallookup(f)
+ for f in forget:
+ repo.dirstate.drop(f)
+
+ if not msg:
+ if not ph.message:
+ message = "[mq]: %s\n" % patchfn
+ else:
+ message = "\n".join(ph.message)
+ else:
+ message = msg
+
+ user = ph.user or changes[1]
+
+ oldphase = repo[top].phase()
+
+ # assumes strip can roll itself back if interrupted
+ repo.setparents(*cparents)
+ self.applied.pop()
+ self.applieddirty = True
+ self.strip(repo, [top], update=False,
+ backup='strip')
+ except: # re-raises
+ repo.dirstate.invalidate()
+ raise
+
+ try:
+ # might be nice to attempt to roll back strip after this
+
+ # Ensure we create a new changeset in the same phase than
+ # the old one.
+ n = newcommit(repo, oldphase, message, user, ph.date,
+ match=match, force=True)
+ # only write patch after a successful commit
+ if inclsubs:
+ self.putsubstate2changes(substatestate, c)
+ chunks = patchmod.diff(repo, patchparent,
+ changes=c, opts=diffopts)
+ for chunk in chunks:
+ patchf.write(chunk)
+ patchf.close()
+ self.applied.append(statusentry(n, patchfn))
+ except: # re-raises
+ ctx = repo[cparents[0]]
+ repo.dirstate.rebuild(ctx.node(), ctx.manifest())
+ self.savedirty()
+ self.ui.warn(_('refresh interrupted while patch was popped! '
+ '(revert --all, qpush to recover)\n'))
+ raise
+ finally:
+ wlock.release()
+ self.removeundo(repo)
+
+ def init(self, repo, create=False):
+ if not create and os.path.isdir(self.path):
+ raise util.Abort(_("patch queue directory already exists"))
+ try:
+ os.mkdir(self.path)
+ except OSError, inst:
+ if inst.errno != errno.EEXIST or not create:
+ raise
+ if create:
+ return self.qrepo(create=True)
+
+ def unapplied(self, repo, patch=None):
+ if patch and patch not in self.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ if not patch:
+ start = self.seriesend()
+ else:
+ start = self.series.index(patch) + 1
+ unapplied = []
+ for i in xrange(start, len(self.series)):
+ pushable, reason = self.pushable(i)
+ if pushable:
+ unapplied.append((i, self.series[i]))
+ self.explainpushable(i)
+ return unapplied
+
+ def qseries(self, repo, missing=None, start=0, length=None, status=None,
+ summary=False):
+ def displayname(pfx, patchname, state):
+ if pfx:
+ self.ui.write(pfx)
+ if summary:
+ ph = patchheader(self.join(patchname), self.plainmode)
+ msg = ph.message and ph.message[0] or ''
+ if self.ui.formatted():
+ width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
+ if width > 0:
+ msg = util.ellipsis(msg, width)
+ else:
+ msg = ''
+ self.ui.write(patchname, label='qseries.' + state)
+ self.ui.write(': ')
+ self.ui.write(msg, label='qseries.message.' + state)
+ else:
+ self.ui.write(patchname, label='qseries.' + state)
+ self.ui.write('\n')
+
+ applied = set([p.name for p in self.applied])
+ if length is None:
+ length = len(self.series) - start
+ if not missing:
+ if self.ui.verbose:
+ idxwidth = len(str(start + length - 1))
+ for i in xrange(start, start + length):
+ patch = self.series[i]
+ if patch in applied:
+ char, state = 'A', 'applied'
+ elif self.pushable(i)[0]:
+ char, state = 'U', 'unapplied'
+ else:
+ char, state = 'G', 'guarded'
+ pfx = ''
+ if self.ui.verbose:
+ pfx = '%*d %s ' % (idxwidth, i, char)
+ elif status and status != char:
+ continue
+ displayname(pfx, patch, state)
+ else:
+ msng_list = []
+ for root, dirs, files in os.walk(self.path):
+ d = root[len(self.path) + 1:]
+ for f in files:
+ fl = os.path.join(d, f)
+ if (fl not in self.series and
+ fl not in (self.statuspath, self.seriespath,
+ self.guardspath)
+ and not fl.startswith('.')):
+ msng_list.append(fl)
+ for x in sorted(msng_list):
+ pfx = self.ui.verbose and ('D ') or ''
+ displayname(pfx, x, 'missing')
+
+ def issaveline(self, l):
+ if l.name == '.hg.patches.save.line':
+ return True
+
+ def qrepo(self, create=False):
+ ui = self.ui.copy()
+ ui.setconfig('paths', 'default', '', overlay=False)
+ ui.setconfig('paths', 'default-push', '', overlay=False)
+ if create or os.path.isdir(self.join(".hg")):
+ return hg.repository(ui, path=self.path, create=create)
+
+ def restore(self, repo, rev, delete=None, qupdate=None):
+ desc = repo[rev].description().strip()
+ lines = desc.splitlines()
+ i = 0
+ datastart = None
+ series = []
+ applied = []
+ qpp = None
+ for i, line in enumerate(lines):
+ if line == 'Patch Data:':
+ datastart = i + 1
+ elif line.startswith('Dirstate:'):
+ l = line.rstrip()
+ l = l[10:].split(' ')
+ qpp = [bin(x) for x in l]
+ elif datastart is not None:
+ l = line.rstrip()
+ n, name = l.split(':', 1)
+ if n:
+ applied.append(statusentry(bin(n), name))
+ else:
+ series.append(l)
+ if datastart is None:
+ self.ui.warn(_("no saved patch data found\n"))
+ return 1
+ self.ui.warn(_("restoring status: %s\n") % lines[0])
+ self.fullseries = series
+ self.applied = applied
+ self.parseseries()
+ self.seriesdirty = True
+ self.applieddirty = True
+ heads = repo.changelog.heads()
+ if delete:
+ if rev not in heads:
+ self.ui.warn(_("save entry has children, leaving it alone\n"))
+ else:
+ self.ui.warn(_("removing save entry %s\n") % short(rev))
+ pp = repo.dirstate.parents()
+ if rev in pp:
+ update = True
+ else:
+ update = False
+ self.strip(repo, [rev], update=update, backup='strip')
+ if qpp:
+ self.ui.warn(_("saved queue repository parents: %s %s\n") %
+ (short(qpp[0]), short(qpp[1])))
+ if qupdate:
+ self.ui.status(_("updating queue directory\n"))
+ r = self.qrepo()
+ if not r:
+ self.ui.warn(_("unable to load queue repository\n"))
+ return 1
+ hg.clean(r, qpp[0])
+
+ def save(self, repo, msg=None):
+ if not self.applied:
+ self.ui.warn(_("save: no patches applied, exiting\n"))
+ return 1
+ if self.issaveline(self.applied[-1]):
+ self.ui.warn(_("status is already saved\n"))
+ return 1
+
+ if not msg:
+ msg = _("hg patches saved state")
+ else:
+ msg = "hg patches: " + msg.rstrip('\r\n')
+ r = self.qrepo()
+ if r:
+ pp = r.dirstate.parents()
+ msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
+ msg += "\n\nPatch Data:\n"
+ msg += ''.join('%s\n' % x for x in self.applied)
+ msg += ''.join(':%s\n' % x for x in self.fullseries)
+ n = repo.commit(msg, force=True)
+ if not n:
+ self.ui.warn(_("repo commit failed\n"))
+ return 1
+ self.applied.append(statusentry(n, '.hg.patches.save.line'))
+ self.applieddirty = True
+ self.removeundo(repo)
+
+ def fullseriesend(self):
+ if self.applied:
+ p = self.applied[-1].name
+ end = self.findseries(p)
+ if end is None:
+ return len(self.fullseries)
+ return end + 1
+ return 0
+
+ def seriesend(self, all_patches=False):
+ """If all_patches is False, return the index of the next pushable patch
+ in the series, or the series length. If all_patches is True, return the
+ index of the first patch past the last applied one.
+ """
+ end = 0
+ def next(start):
+ if all_patches or start >= len(self.series):
+ return start
+ for i in xrange(start, len(self.series)):
+ p, reason = self.pushable(i)
+ if p:
+ return i
+ self.explainpushable(i)
+ return len(self.series)
+ if self.applied:
+ p = self.applied[-1].name
+ try:
+ end = self.series.index(p)
+ except ValueError:
+ return 0
+ return next(end + 1)
+ return next(end)
+
+ def appliedname(self, index):
+ pname = self.applied[index].name
+ if not self.ui.verbose:
+ p = pname
+ else:
+ p = str(self.series.index(pname)) + " " + pname
+ return p
+
+ def qimport(self, repo, files, patchname=None, rev=None, existing=None,
+ force=None, git=False):
+ def checkseries(patchname):
+ if patchname in self.series:
+ raise util.Abort(_('patch %s is already in the series file')
+ % patchname)
+
+ if rev:
+ if files:
+ raise util.Abort(_('option "-r" not valid when importing '
+ 'files'))
+ rev = scmutil.revrange(repo, rev)
+ rev.sort(reverse=True)
+ elif not files:
+ raise util.Abort(_('no files or revisions specified'))
+ if (len(files) > 1 or len(rev) > 1) and patchname:
+ raise util.Abort(_('option "-n" not valid when importing multiple '
+ 'patches'))
+ imported = []
+ if rev:
+ # If mq patches are applied, we can only import revisions
+ # that form a linear path to qbase.
+ # Otherwise, they should form a linear path to a head.
+ heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
+ if len(heads) > 1:
+ raise util.Abort(_('revision %d is the root of more than one '
+ 'branch') % rev[-1])
+ if self.applied:
+ base = repo.changelog.node(rev[0])
+ if base in [n.node for n in self.applied]:
+ raise util.Abort(_('revision %d is already managed')
+ % rev[0])
+ if heads != [self.applied[-1].node]:
+ raise util.Abort(_('revision %d is not the parent of '
+ 'the queue') % rev[0])
+ base = repo.changelog.rev(self.applied[0].node)
+ lastparent = repo.changelog.parentrevs(base)[0]
+ else:
+ if heads != [repo.changelog.node(rev[0])]:
+ raise util.Abort(_('revision %d has unmanaged children')
+ % rev[0])
+ lastparent = None
+
+ diffopts = self.diffopts({'git': git})
+ for r in rev:
+ if not repo[r].mutable():
+ raise util.Abort(_('revision %d is not mutable') % r,
+ hint=_('see "hg help phases" for details'))
+ p1, p2 = repo.changelog.parentrevs(r)
+ n = repo.changelog.node(r)
+ if p2 != nullrev:
+ raise util.Abort(_('cannot import merge revision %d') % r)
+ if lastparent and lastparent != r:
+ raise util.Abort(_('revision %d is not the parent of %d')
+ % (r, lastparent))
+ lastparent = p1
+
+ if not patchname:
+ patchname = normname('%d.diff' % r)
+ checkseries(patchname)
+ self.checkpatchname(patchname, force)
+ self.fullseries.insert(0, patchname)
+
+ patchf = self.opener(patchname, "w")
+ cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
+ patchf.close()
+
+ se = statusentry(n, patchname)
+ self.applied.insert(0, se)
+
+ self.added.append(patchname)
+ imported.append(patchname)
+ patchname = None
+ if rev and repo.ui.configbool('mq', 'secret', False):
+ # if we added anything with --rev, we must move the secret root
+ phases.retractboundary(repo, phases.secret, [n])
+ self.parseseries()
+ self.applieddirty = True
+ self.seriesdirty = True
+
+ for i, filename in enumerate(files):
+ if existing:
+ if filename == '-':
+ raise util.Abort(_('-e is incompatible with import from -'))
+ filename = normname(filename)
+ self.checkreservedname(filename)
+ originpath = self.join(filename)
+ if not os.path.isfile(originpath):
+ raise util.Abort(_("patch %s does not exist") % filename)
+
+ if patchname:
+ self.checkpatchname(patchname, force)
+
+ self.ui.write(_('renaming %s to %s\n')
+ % (filename, patchname))
+ util.rename(originpath, self.join(patchname))
+ else:
+ patchname = filename
+
+ else:
+ if filename == '-' and not patchname:
+ raise util.Abort(_('need --name to import a patch from -'))
+ elif not patchname:
+ patchname = normname(os.path.basename(filename.rstrip('/')))
+ self.checkpatchname(patchname, force)
+ try:
+ if filename == '-':
+ text = self.ui.fin.read()
+ else:
+ fp = url.open(self.ui, filename)
+ text = fp.read()
+ fp.close()
+ except (OSError, IOError):
+ raise util.Abort(_("unable to read file %s") % filename)
+ patchf = self.opener(patchname, "w")
+ patchf.write(text)
+ patchf.close()
+ if not force:
+ checkseries(patchname)
+ if patchname not in self.series:
+ index = self.fullseriesend() + i
+ self.fullseries[index:index] = [patchname]
+ self.parseseries()
+ self.seriesdirty = True
+ self.ui.warn(_("adding %s to series file\n") % patchname)
+ self.added.append(patchname)
+ imported.append(patchname)
+ patchname = None
+
+ self.removeundo(repo)
+ return imported
+
+def fixkeepchangesopts(ui, opts):
+ if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
+ or opts.get('exact')):
+ return opts
+ opts = dict(opts)
+ opts['keep_changes'] = True
+ return opts
+
+@command("qdelete|qremove|qrm",
+ [('k', 'keep', None, _('keep patch file')),
+ ('r', 'rev', [],
+ _('stop managing a revision (DEPRECATED)'), _('REV'))],
+ _('hg qdelete [-k] [PATCH]...'))
+def delete(ui, repo, *patches, **opts):
+ """remove patches from queue
+
+ The patches must not be applied, and at least one patch is required. Exact
+ patch identifiers must be given. With -k/--keep, the patch files are
+ preserved in the patch directory.
+
+ To stop managing a patch and move it into permanent history,
+ use the :hg:`qfinish` command."""
+ q = repo.mq
+ q.delete(repo, patches, opts)
+ q.savedirty()
+ return 0
+
+@command("qapplied",
+ [('1', 'last', None, _('show only the preceding applied patch'))
+ ] + seriesopts,
+ _('hg qapplied [-1] [-s] [PATCH]'))
+def applied(ui, repo, patch=None, **opts):
+ """print the patches already applied
+
+ Returns 0 on success."""
+
+ q = repo.mq
+
+ if patch:
+ if patch not in q.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ end = q.series.index(patch) + 1
+ else:
+ end = q.seriesend(True)
+
+ if opts.get('last') and not end:
+ ui.write(_("no patches applied\n"))
+ return 1
+ elif opts.get('last') and end == 1:
+ ui.write(_("only one patch applied\n"))
+ return 1
+ elif opts.get('last'):
+ start = end - 2
+ end = 1
+ else:
+ start = 0
+
+ q.qseries(repo, length=end, start=start, status='A',
+ summary=opts.get('summary'))
+
+
+@command("qunapplied",
+ [('1', 'first', None, _('show only the first patch'))] + seriesopts,
+ _('hg qunapplied [-1] [-s] [PATCH]'))
+def unapplied(ui, repo, patch=None, **opts):
+ """print the patches not yet applied
+
+ Returns 0 on success."""
+
+ q = repo.mq
+ if patch:
+ if patch not in q.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ start = q.series.index(patch) + 1
+ else:
+ start = q.seriesend(True)
+
+ if start == len(q.series) and opts.get('first'):
+ ui.write(_("all patches applied\n"))
+ return 1
+
+ length = opts.get('first') and 1 or None
+ q.qseries(repo, start=start, length=length, status='U',
+ summary=opts.get('summary'))
+
+@command("qimport",
+ [('e', 'existing', None, _('import file in patch directory')),
+ ('n', 'name', '',
+ _('name of patch file'), _('NAME')),
+ ('f', 'force', None, _('overwrite existing files')),
+ ('r', 'rev', [],
+ _('place existing revisions under mq control'), _('REV')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('P', 'push', None, _('qpush after importing'))],
+ _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
+def qimport(ui, repo, *filename, **opts):
+ """import a patch or existing changeset
+
+ The patch is inserted into the series after the last applied
+ patch. If no patches have been applied, qimport prepends the patch
+ to the series.
+
+ The patch will have the same name as its source file unless you
+ give it a new one with -n/--name.
+
+ You can register an existing patch inside the patch directory with
+ the -e/--existing flag.
+
+ With -f/--force, an existing patch of the same name will be
+ overwritten.
+
+ An existing changeset may be placed under mq control with -r/--rev
+ (e.g. qimport --rev tip -n patch will place tip under mq control).
+ With -g/--git, patches imported with --rev will use the git diff
+ format. See the diffs help topic for information on why this is
+ important for preserving rename/copy information and permission
+ changes. Use :hg:`qfinish` to remove changesets from mq control.
+
+ To import a patch from standard input, pass - as the patch file.
+ When importing from standard input, a patch name must be specified
+ using the --name flag.
+
+ To import an existing patch while renaming it::
+
+ hg qimport -e existing-patch -n new-name
+
+ Returns 0 if import succeeded.
+ """
+ lock = repo.lock() # cause this may move phase
+ try:
+ q = repo.mq
+ try:
+ imported = q.qimport(
+ repo, filename, patchname=opts.get('name'),
+ existing=opts.get('existing'), force=opts.get('force'),
+ rev=opts.get('rev'), git=opts.get('git'))
+ finally:
+ q.savedirty()
+ finally:
+ lock.release()
+
+ if imported and opts.get('push') and not opts.get('rev'):
+ return q.push(repo, imported[-1])
+ return 0
+
+def qinit(ui, repo, create):
+ """initialize a new queue repository
+
+ This command also creates a series file for ordering patches, and
+ an mq-specific .hgignore file in the queue repository, to exclude
+ the status and guards files (these contain mostly transient state).
+
+ Returns 0 if initialization succeeded."""
+ q = repo.mq
+ r = q.init(repo, create)
+ q.savedirty()
+ if r:
+ if not os.path.exists(r.wjoin('.hgignore')):
+ fp = r.wopener('.hgignore', 'w')
+ fp.write('^\\.hg\n')
+ fp.write('^\\.mq\n')
+ fp.write('syntax: glob\n')
+ fp.write('status\n')
+ fp.write('guards\n')
+ fp.close()
+ if not os.path.exists(r.wjoin('series')):
+ r.wopener('series', 'w').close()
+ r[None].add(['.hgignore', 'series'])
+ commands.add(ui, r)
+ return 0
+
+@command("^qinit",
+ [('c', 'create-repo', None, _('create queue repository'))],
+ _('hg qinit [-c]'))
+def init(ui, repo, **opts):
+ """init a new queue repository (DEPRECATED)
+
+ The queue repository is unversioned by default. If
+ -c/--create-repo is specified, qinit will create a separate nested
+ repository for patches (qinit -c may also be run later to convert
+ an unversioned patch repository into a versioned one). You can use
+ qcommit to commit changes to this queue repository.
+
+ This command is deprecated. Without -c, it's implied by other relevant
+ commands. With -c, use :hg:`init --mq` instead."""
+ return qinit(ui, repo, create=opts.get('create_repo'))
+
+@command("qclone",
+ [('', 'pull', None, _('use pull protocol to copy metadata')),
+ ('U', 'noupdate', None,
+ _('do not update the new working directories')),
+ ('', 'uncompressed', None,
+ _('use uncompressed transfer (fast over LAN)')),
+ ('p', 'patches', '',
+ _('location of source patch repository'), _('REPO')),
+ ] + commands.remoteopts,
+ _('hg qclone [OPTION]... SOURCE [DEST]'))
+def clone(ui, source, dest=None, **opts):
+ '''clone main and patch repository at same time
+
+ If source is local, destination will have no patches applied. If
+ source is remote, this command can not check if patches are
+ applied in source, so cannot guarantee that patches are not
+ applied in destination. If you clone remote repository, be sure
+ before that it has no patches applied.
+
+ Source patch repository is looked for in <src>/.hg/patches by
+ default. Use -p <url> to change.
+
+ The patch directory must be a nested Mercurial repository, as
+ would be created by :hg:`init --mq`.
+
+ Return 0 on success.
+ '''
+ def patchdir(repo):
+ """compute a patch repo url from a repo object"""
+ url = repo.url()
+ if url.endswith('/'):
+ url = url[:-1]
+ return url + '/.hg/patches'
+
+ # main repo (destination and sources)
+ if dest is None:
+ dest = hg.defaultdest(source)
+ sr = hg.peer(ui, opts, ui.expandpath(source))
+
+ # patches repo (source only)
+ if opts.get('patches'):
+ patchespath = ui.expandpath(opts.get('patches'))
+ else:
+ patchespath = patchdir(sr)
+ try:
+ hg.peer(ui, opts, patchespath)
+ except error.RepoError:
+ raise util.Abort(_('versioned patch repository not found'
+ ' (see init --mq)'))
+ qbase, destrev = None, None
+ if sr.local():
+ repo = sr.local()
+ if repo.mq.applied and repo[qbase].phase() != phases.secret:
+ qbase = repo.mq.applied[0].node
+ if not hg.islocal(dest):
+ heads = set(repo.heads())
+ destrev = list(heads.difference(repo.heads(qbase)))
+ destrev.append(repo.changelog.parents(qbase)[0])
+ elif sr.capable('lookup'):
+ try:
+ qbase = sr.lookup('qbase')
+ except error.RepoError:
+ pass
+
+ ui.note(_('cloning main repository\n'))
+ sr, dr = hg.clone(ui, opts, sr.url(), dest,
+ pull=opts.get('pull'),
+ rev=destrev,
+ update=False,
+ stream=opts.get('uncompressed'))
+
+ ui.note(_('cloning patch repository\n'))
+ hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
+ pull=opts.get('pull'), update=not opts.get('noupdate'),
+ stream=opts.get('uncompressed'))
+
+ if dr.local():
+ repo = dr.local()
+ if qbase:
+ ui.note(_('stripping applied patches from destination '
+ 'repository\n'))
+ repo.mq.strip(repo, [qbase], update=False, backup=None)
+ if not opts.get('noupdate'):
+ ui.note(_('updating destination repository\n'))
+ hg.update(repo, repo.changelog.tip())
+
+@command("qcommit|qci",
+ commands.table["^commit|ci"][1],
+ _('hg qcommit [OPTION]... [FILE]...'))
+def commit(ui, repo, *pats, **opts):
+ """commit changes in the queue repository (DEPRECATED)
+
+ This command is deprecated; use :hg:`commit --mq` instead."""
+ q = repo.mq
+ r = q.qrepo()
+ if not r:
+ raise util.Abort('no queue repository')
+ commands.commit(r.ui, r, *pats, **opts)
+
+@command("qseries",
+ [('m', 'missing', None, _('print patches not in series')),
+ ] + seriesopts,
+ _('hg qseries [-ms]'))
+def series(ui, repo, **opts):
+ """print the entire series file
+
+ Returns 0 on success."""
+ repo.mq.qseries(repo, missing=opts.get('missing'),
+ summary=opts.get('summary'))
+ return 0
+
+@command("qtop", seriesopts, _('hg qtop [-s]'))
+def top(ui, repo, **opts):
+ """print the name of the current patch
+
+ Returns 0 on success."""
+ q = repo.mq
+ t = q.applied and q.seriesend(True) or 0
+ if t:
+ q.qseries(repo, start=t - 1, length=1, status='A',
+ summary=opts.get('summary'))
+ else:
+ ui.write(_("no patches applied\n"))
+ return 1
+
+@command("qnext", seriesopts, _('hg qnext [-s]'))
+def next(ui, repo, **opts):
+ """print the name of the next pushable patch
+
+ Returns 0 on success."""
+ q = repo.mq
+ end = q.seriesend()
+ if end == len(q.series):
+ ui.write(_("all patches applied\n"))
+ return 1
+ q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
+
+@command("qprev", seriesopts, _('hg qprev [-s]'))
+def prev(ui, repo, **opts):
+ """print the name of the preceding applied patch
+
+ Returns 0 on success."""
+ q = repo.mq
+ l = len(q.applied)
+ if l == 1:
+ ui.write(_("only one patch applied\n"))
+ return 1
+ if not l:
+ ui.write(_("no patches applied\n"))
+ return 1
+ idx = q.series.index(q.applied[-2].name)
+ q.qseries(repo, start=idx, length=1, status='A',
+ summary=opts.get('summary'))
+
+def setupheaderopts(ui, opts):
+ if not opts.get('user') and opts.get('currentuser'):
+ opts['user'] = ui.username()
+ if not opts.get('date') and opts.get('currentdate'):
+ opts['date'] = "%d %d" % util.makedate()
+
+@command("^qnew",
+ [('e', 'edit', None, _('edit commit message')),
+ ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
+ ('u', 'user', '',
+ _('add "From: <USER>" to patch'), _('USER')),
+ ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
+ ('d', 'date', '',
+ _('add "Date: <DATE>" to patch'), _('DATE'))
+ ] + commands.walkopts + commands.commitopts,
+ _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
+def new(ui, repo, patch, *args, **opts):
+ """create a new patch
+
+ qnew creates a new patch on top of the currently-applied patch (if
+ any). The patch will be initialized with any outstanding changes
+ in the working directory. You may also use -I/--include,
+ -X/--exclude, and/or a list of files after the patch name to add
+ only changes to matching files to the new patch, leaving the rest
+ as uncommitted modifications.
+
+ -u/--user and -d/--date can be used to set the (given) user and
+ date, respectively. -U/--currentuser and -D/--currentdate set user
+ to current user and date to current date.
+
+ -e/--edit, -m/--message or -l/--logfile set the patch header as
+ well as the commit message. If none is specified, the header is
+ empty and the commit message is '[mq]: PATCH'.
+
+ Use the -g/--git option to keep the patch in the git extended diff
+ format. Read the diffs help topic for more information on why this
+ is important for preserving permission changes and copy/rename
+ information.
+
+ Returns 0 on successful creation of a new patch.
+ """
+ msg = cmdutil.logmessage(ui, opts)
+ def getmsg():
+ return ui.edit(msg, opts.get('user') or ui.username())
+ q = repo.mq
+ opts['msg'] = msg
+ if opts.get('edit'):
+ opts['msg'] = getmsg
+ else:
+ opts['msg'] = msg
+ setupheaderopts(ui, opts)
+ q.new(repo, patch, *args, **opts)
+ q.savedirty()
+ return 0
+
+@command("^qrefresh",
+ [('e', 'edit', None, _('edit commit message')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('s', 'short', None,
+ _('refresh only files already in the patch and specified files')),
+ ('U', 'currentuser', None,
+ _('add/update author field in patch with current user')),
+ ('u', 'user', '',
+ _('add/update author field in patch with given user'), _('USER')),
+ ('D', 'currentdate', None,
+ _('add/update date field in patch with current date')),
+ ('d', 'date', '',
+ _('add/update date field in patch with given date'), _('DATE'))
+ ] + commands.walkopts + commands.commitopts,
+ _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
+def refresh(ui, repo, *pats, **opts):
+ """update the current patch
+
+ If any file patterns are provided, the refreshed patch will
+ contain only the modifications that match those patterns; the
+ remaining modifications will remain in the working directory.
+
+ If -s/--short is specified, files currently included in the patch
+ will be refreshed just like matched files and remain in the patch.
+
+ If -e/--edit is specified, Mercurial will start your configured editor for
+ you to enter a message. In case qrefresh fails, you will find a backup of
+ your message in ``.hg/last-message.txt``.
+
+ hg add/remove/copy/rename work as usual, though you might want to
+ use git-style patches (-g/--git or [diff] git=1) to track copies
+ and renames. See the diffs help topic for more information on the
+ git diff format.
+
+ Returns 0 on success.
+ """
+ q = repo.mq
+ message = cmdutil.logmessage(ui, opts)
+ if opts.get('edit'):
+ if not q.applied:
+ ui.write(_("no patches applied\n"))
+ return 1
+ if message:
+ raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
+ patch = q.applied[-1].name
+ ph = patchheader(q.join(patch), q.plainmode)
+ message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
+ # We don't want to lose the patch message if qrefresh fails (issue2062)
+ repo.savecommitmessage(message)
+ setupheaderopts(ui, opts)
+ wlock = repo.wlock()
+ try:
+ ret = q.refresh(repo, pats, msg=message, **opts)
+ q.savedirty()
+ return ret
+ finally:
+ wlock.release()
+
+@command("^qdiff",
+ commands.diffopts + commands.diffopts2 + commands.walkopts,
+ _('hg qdiff [OPTION]... [FILE]...'))
+def diff(ui, repo, *pats, **opts):
+ """diff of the current patch and subsequent modifications
+
+ Shows a diff which includes the current patch as well as any
+ changes which have been made in the working directory since the
+ last refresh (thus showing what the current patch would become
+ after a qrefresh).
+
+ Use :hg:`diff` if you only want to see the changes made since the
+ last qrefresh, or :hg:`export qtip` if you want to see changes
+ made by the current patch without including changes made since the
+ qrefresh.
+
+ Returns 0 on success.
+ """
+ repo.mq.diff(repo, pats, opts)
+ return 0
+
+@command('qfold',
+ [('e', 'edit', None, _('edit patch header')),
+ ('k', 'keep', None, _('keep folded patch files')),
+ ] + commands.commitopts,
+ _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
+def fold(ui, repo, *files, **opts):
+ """fold the named patches into the current patch
+
+ Patches must not yet be applied. Each patch will be successively
+ applied to the current patch in the order given. If all the
+ patches apply successfully, the current patch will be refreshed
+ with the new cumulative patch, and the folded patches will be
+ deleted. With -k/--keep, the folded patch files will not be
+ removed afterwards.
+
+ The header for each folded patch will be concatenated with the
+ current patch header, separated by a line of ``* * *``.
+
+ Returns 0 on success."""
+ q = repo.mq
+ if not files:
+ raise util.Abort(_('qfold requires at least one patch name'))
+ if not q.checktoppatch(repo)[0]:
+ raise util.Abort(_('no patches applied'))
+ q.checklocalchanges(repo)
+
+ message = cmdutil.logmessage(ui, opts)
+ if opts.get('edit'):
+ if message:
+ raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
+
+ parent = q.lookup('qtip')
+ patches = []
+ messages = []
+ for f in files:
+ p = q.lookup(f)
+ if p in patches or p == parent:
+ ui.warn(_('skipping already folded patch %s\n') % p)
+ if q.isapplied(p):
+ raise util.Abort(_('qfold cannot fold already applied patch %s')
+ % p)
+ patches.append(p)
+
+ for p in patches:
+ if not message:
+ ph = patchheader(q.join(p), q.plainmode)
+ if ph.message:
+ messages.append(ph.message)
+ pf = q.join(p)
+ (patchsuccess, files, fuzz) = q.patch(repo, pf)
+ if not patchsuccess:
+ raise util.Abort(_('error folding patch %s') % p)
+
+ if not message:
+ ph = patchheader(q.join(parent), q.plainmode)
+ message, user = ph.message, ph.user
+ for msg in messages:
+ message.append('* * *')
+ message.extend(msg)
+ message = '\n'.join(message)
+
+ if opts.get('edit'):
+ message = ui.edit(message, user or ui.username())
+
+ diffopts = q.patchopts(q.diffopts(), *patches)
+ wlock = repo.wlock()
+ try:
+ q.refresh(repo, msg=message, git=diffopts.git)
+ q.delete(repo, patches, opts)
+ q.savedirty()
+ finally:
+ wlock.release()
+
+@command("qgoto",
+ [('', 'keep-changes', None,
+ _('tolerate non-conflicting local changes')),
+ ('f', 'force', None, _('overwrite any local changes')),
+ ('', 'no-backup', None, _('do not save backup copies of files'))],
+ _('hg qgoto [OPTION]... PATCH'))
+def goto(ui, repo, patch, **opts):
+ '''push or pop patches until named patch is at top of stack
+
+ Returns 0 on success.'''
+ opts = fixkeepchangesopts(ui, opts)
+ q = repo.mq
+ patch = q.lookup(patch)
+ nobackup = opts.get('no_backup')
+ keepchanges = opts.get('keep_changes')
+ if q.isapplied(patch):
+ ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
+ keepchanges=keepchanges)
+ else:
+ ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
+ keepchanges=keepchanges)
+ q.savedirty()
+ return ret
+
+@command("qguard",
+ [('l', 'list', None, _('list all patches and guards')),
+ ('n', 'none', None, _('drop all guards'))],
+ _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
+def guard(ui, repo, *args, **opts):
+ '''set or print guards for a patch
+
+ Guards control whether a patch can be pushed. A patch with no
+ guards is always pushed. A patch with a positive guard ("+foo") is
+ pushed only if the :hg:`qselect` command has activated it. A patch with
+ a negative guard ("-foo") is never pushed if the :hg:`qselect` command
+ has activated it.
+
+ With no arguments, print the currently active guards.
+ With arguments, set guards for the named patch.
+
+ .. note::
+ Specifying negative guards now requires '--'.
+
+ To set guards on another patch::
+
+ hg qguard other.patch -- +2.6.17 -stable
+
+ Returns 0 on success.
+ '''
+ def status(idx):
+ guards = q.seriesguards[idx] or ['unguarded']
+ if q.series[idx] in applied:
+ state = 'applied'
+ elif q.pushable(idx)[0]:
+ state = 'unapplied'
+ else:
+ state = 'guarded'
+ label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
+ ui.write('%s: ' % ui.label(q.series[idx], label))
+
+ for i, guard in enumerate(guards):
+ if guard.startswith('+'):
+ ui.write(guard, label='qguard.positive')
+ elif guard.startswith('-'):
+ ui.write(guard, label='qguard.negative')
+ else:
+ ui.write(guard, label='qguard.unguarded')
+ if i != len(guards) - 1:
+ ui.write(' ')
+ ui.write('\n')
+ q = repo.mq
+ applied = set(p.name for p in q.applied)
+ patch = None
+ args = list(args)
+ if opts.get('list'):
+ if args or opts.get('none'):
+ raise util.Abort(_('cannot mix -l/--list with options or '
+ 'arguments'))
+ for i in xrange(len(q.series)):
+ status(i)
+ return
+ if not args or args[0][0:1] in '-+':
+ if not q.applied:
+ raise util.Abort(_('no patches applied'))
+ patch = q.applied[-1].name
+ if patch is None and args[0][0:1] not in '-+':
+ patch = args.pop(0)
+ if patch is None:
+ raise util.Abort(_('no patch to work with'))
+ if args or opts.get('none'):
+ idx = q.findseries(patch)
+ if idx is None:
+ raise util.Abort(_('no patch named %s') % patch)
+ q.setguards(idx, args)
+ q.savedirty()
+ else:
+ status(q.series.index(q.lookup(patch)))
+
+@command("qheader", [], _('hg qheader [PATCH]'))
+def header(ui, repo, patch=None):
+ """print the header of the topmost or specified patch
+
+ Returns 0 on success."""
+ q = repo.mq
+
+ if patch:
+ patch = q.lookup(patch)
+ else:
+ if not q.applied:
+ ui.write(_('no patches applied\n'))
+ return 1
+ patch = q.lookup('qtip')
+ ph = patchheader(q.join(patch), q.plainmode)
+
+ ui.write('\n'.join(ph.message) + '\n')
+
+def lastsavename(path):
+ (directory, base) = os.path.split(path)
+ names = os.listdir(directory)
+ namere = re.compile("%s.([0-9]+)" % base)
+ maxindex = None
+ maxname = None
+ for f in names:
+ m = namere.match(f)
+ if m:
+ index = int(m.group(1))
+ if maxindex is None or index > maxindex:
+ maxindex = index
+ maxname = f
+ if maxname:
+ return (os.path.join(directory, maxname), maxindex)
+ return (None, None)
+
+def savename(path):
+ (last, index) = lastsavename(path)
+ if last is None:
+ index = 0
+ newpath = path + ".%d" % (index + 1)
+ return newpath
+
+@command("^qpush",
+ [('', 'keep-changes', None,
+ _('tolerate non-conflicting local changes')),
+ ('f', 'force', None, _('apply on top of local changes')),
+ ('e', 'exact', None,
+ _('apply the target patch to its recorded parent')),
+ ('l', 'list', None, _('list patch name in commit text')),
+ ('a', 'all', None, _('apply all patches')),
+ ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
+ ('n', 'name', '',
+ _('merge queue name (DEPRECATED)'), _('NAME')),
+ ('', 'move', None,
+ _('reorder patch series and apply only the patch')),
+ ('', 'no-backup', None, _('do not save backup copies of files'))],
+ _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
+def push(ui, repo, patch=None, **opts):
+ """push the next patch onto the stack
+
+ By default, abort if the working directory contains uncommitted
+ changes. With --keep-changes, abort only if the uncommitted files
+ overlap with patched files. With -f/--force, backup and patch over
+ uncommitted changes.
+
+ Return 0 on success.
+ """
+ q = repo.mq
+ mergeq = None
+
+ opts = fixkeepchangesopts(ui, opts)
+ if opts.get('merge'):
+ if opts.get('name'):
+ newpath = repo.join(opts.get('name'))
+ else:
+ newpath, i = lastsavename(q.path)
+ if not newpath:
+ ui.warn(_("no saved queues found, please use -n\n"))
+ return 1
+ mergeq = queue(ui, repo.path, newpath)
+ ui.warn(_("merging with queue at: %s\n") % mergeq.path)
+ ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
+ mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
+ exact=opts.get('exact'), nobackup=opts.get('no_backup'),
+ keepchanges=opts.get('keep_changes'))
+ return ret
+
+@command("^qpop",
+ [('a', 'all', None, _('pop all patches')),
+ ('n', 'name', '',
+ _('queue name to pop (DEPRECATED)'), _('NAME')),
+ ('', 'keep-changes', None,
+ _('tolerate non-conflicting local changes')),
+ ('f', 'force', None, _('forget any local changes to patched files')),
+ ('', 'no-backup', None, _('do not save backup copies of files'))],
+ _('hg qpop [-a] [-f] [PATCH | INDEX]'))
+def pop(ui, repo, patch=None, **opts):
+ """pop the current patch off the stack
+
+ Without argument, pops off the top of the patch stack. If given a
+ patch name, keeps popping off patches until the named patch is at
+ the top of the stack.
+
+ By default, abort if the working directory contains uncommitted
+ changes. With --keep-changes, abort only if the uncommitted files
+ overlap with patched files. With -f/--force, backup and discard
+ changes made to such files.
+
+ Return 0 on success.
+ """
+ opts = fixkeepchangesopts(ui, opts)
+ localupdate = True
+ if opts.get('name'):
+ q = queue(ui, repo.path, repo.join(opts.get('name')))
+ ui.warn(_('using patch queue: %s\n') % q.path)
+ localupdate = False
+ else:
+ q = repo.mq
+ ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
+ all=opts.get('all'), nobackup=opts.get('no_backup'),
+ keepchanges=opts.get('keep_changes'))
+ q.savedirty()
+ return ret
+
+@command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
+def rename(ui, repo, patch, name=None, **opts):
+ """rename a patch
+
+ With one argument, renames the current patch to PATCH1.
+ With two arguments, renames PATCH1 to PATCH2.
+
+ Returns 0 on success."""
+ q = repo.mq
+ if not name:
+ name = patch
+ patch = None
+
+ if patch:
+ patch = q.lookup(patch)
+ else:
+ if not q.applied:
+ ui.write(_('no patches applied\n'))
+ return
+ patch = q.lookup('qtip')
+ absdest = q.join(name)
+ if os.path.isdir(absdest):
+ name = normname(os.path.join(name, os.path.basename(patch)))
+ absdest = q.join(name)
+ q.checkpatchname(name)
+
+ ui.note(_('renaming %s to %s\n') % (patch, name))
+ i = q.findseries(patch)
+ guards = q.guard_re.findall(q.fullseries[i])
+ q.fullseries[i] = name + ''.join([' #' + g for g in guards])
+ q.parseseries()
+ q.seriesdirty = True
+
+ info = q.isapplied(patch)
+ if info:
+ q.applied[info[0]] = statusentry(info[1], name)
+ q.applieddirty = True
+
+ destdir = os.path.dirname(absdest)
+ if not os.path.isdir(destdir):
+ os.makedirs(destdir)
+ util.rename(q.join(patch), absdest)
+ r = q.qrepo()
+ if r and patch in r.dirstate:
+ wctx = r[None]
+ wlock = r.wlock()
+ try:
+ if r.dirstate[patch] == 'a':
+ r.dirstate.drop(patch)
+ r.dirstate.add(name)
+ else:
+ wctx.copy(patch, name)
+ wctx.forget([patch])
+ finally:
+ wlock.release()
+
+ q.savedirty()
+
+@command("qrestore",
+ [('d', 'delete', None, _('delete save entry')),
+ ('u', 'update', None, _('update queue working directory'))],
+ _('hg qrestore [-d] [-u] REV'))
+def restore(ui, repo, rev, **opts):
+ """restore the queue state saved by a revision (DEPRECATED)
+
+ This command is deprecated, use :hg:`rebase` instead."""
+ rev = repo.lookup(rev)
+ q = repo.mq
+ q.restore(repo, rev, delete=opts.get('delete'),
+ qupdate=opts.get('update'))
+ q.savedirty()
+ return 0
+
+@command("qsave",
+ [('c', 'copy', None, _('copy patch directory')),
+ ('n', 'name', '',
+ _('copy directory name'), _('NAME')),
+ ('e', 'empty', None, _('clear queue status file')),
+ ('f', 'force', None, _('force copy'))] + commands.commitopts,
+ _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
+def save(ui, repo, **opts):
+ """save current queue state (DEPRECATED)
+
+ This command is deprecated, use :hg:`rebase` instead."""
+ q = repo.mq
+ message = cmdutil.logmessage(ui, opts)
+ ret = q.save(repo, msg=message)
+ if ret:
+ return ret
+ q.savedirty() # save to .hg/patches before copying
+ if opts.get('copy'):
+ path = q.path
+ if opts.get('name'):
+ newpath = os.path.join(q.basepath, opts.get('name'))
+ if os.path.exists(newpath):
+ if not os.path.isdir(newpath):
+ raise util.Abort(_('destination %s exists and is not '
+ 'a directory') % newpath)
+ if not opts.get('force'):
+ raise util.Abort(_('destination %s exists, '
+ 'use -f to force') % newpath)
+ else:
+ newpath = savename(path)
+ ui.warn(_("copy %s to %s\n") % (path, newpath))
+ util.copyfiles(path, newpath)
+ if opts.get('empty'):
+ del q.applied[:]
+ q.applieddirty = True
+ q.savedirty()
+ return 0
+
+@command("strip",
+ [
+ ('r', 'rev', [], _('strip specified revision (optional, '
+ 'can specify revisions without this '
+ 'option)'), _('REV')),
+ ('f', 'force', None, _('force removal of changesets, discard '
+ 'uncommitted changes (no backup)')),
+ ('b', 'backup', None, _('bundle only changesets with local revision'
+ ' number greater than REV which are not'
+ ' descendants of REV (DEPRECATED)')),
+ ('', 'no-backup', None, _('no backups')),
+ ('', 'nobackup', None, _('no backups (DEPRECATED)')),
+ ('n', '', None, _('ignored (DEPRECATED)')),
+ ('k', 'keep', None, _("do not modify working copy during strip")),
+ ('B', 'bookmark', '', _("remove revs only reachable from given"
+ " bookmark"))],
+ _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
+def strip(ui, repo, *revs, **opts):
+ """strip changesets and all their descendants from the repository
+
+ The strip command removes the specified changesets and all their
+ descendants. If the working directory has uncommitted changes, the
+ operation is aborted unless the --force flag is supplied, in which
+ case changes will be discarded.
+
+ If a parent of the working directory is stripped, then the working
+ directory will automatically be updated to the most recent
+ available ancestor of the stripped parent after the operation
+ completes.
+
+ Any stripped changesets are stored in ``.hg/strip-backup`` as a
+ bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
+ be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
+ where BUNDLE is the bundle file created by the strip. Note that
+ the local revision numbers will in general be different after the
+ restore.
+
+ Use the --no-backup option to discard the backup bundle once the
+ operation completes.
+
+ Strip is not a history-rewriting operation and can be used on
+ changesets in the public phase. But if the stripped changesets have
+ been pushed to a remote repository you will likely pull them again.
+
+ Return 0 on success.
+ """
+ backup = 'all'
+ if opts.get('backup'):
+ backup = 'strip'
+ elif opts.get('no_backup') or opts.get('nobackup'):
+ backup = 'none'
+
+ cl = repo.changelog
+ revs = list(revs) + opts.get('rev')
+ revs = set(scmutil.revrange(repo, revs))
+
+ if opts.get('bookmark'):
+ mark = opts.get('bookmark')
+ marks = repo._bookmarks
+ if mark not in marks:
+ raise util.Abort(_("bookmark '%s' not found") % mark)
+
+ # If the requested bookmark is not the only one pointing to a
+ # a revision we have to only delete the bookmark and not strip
+ # anything. revsets cannot detect that case.
+ uniquebm = True
+ for m, n in marks.iteritems():
+ if m != mark and n == repo[mark].node():
+ uniquebm = False
+ break
+ if uniquebm:
+ rsrevs = repo.revs("ancestors(bookmark(%s)) - "
+ "ancestors(head() and not bookmark(%s)) - "
+ "ancestors(bookmark() and not bookmark(%s))",
+ mark, mark, mark)
+ revs.update(set(rsrevs))
+ if not revs:
+ del marks[mark]
+ repo._writebookmarks(mark)
+ ui.write(_("bookmark '%s' deleted\n") % mark)
+
+ if not revs:
+ raise util.Abort(_('empty revision set'))
+
+ descendants = set(cl.descendants(revs))
+ strippedrevs = revs.union(descendants)
+ roots = revs.difference(descendants)
+
+ update = False
+ # if one of the wdir parent is stripped we'll need
+ # to update away to an earlier revision
+ for p in repo.dirstate.parents():
+ if p != nullid and cl.rev(p) in strippedrevs:
+ update = True
+ break
+
+ rootnodes = set(cl.node(r) for r in roots)
+
+ q = repo.mq
+ if q.applied:
+ # refresh queue state if we're about to strip
+ # applied patches
+ if cl.rev(repo.lookup('qtip')) in strippedrevs:
+ q.applieddirty = True
+ start = 0
+ end = len(q.applied)
+ for i, statusentry in enumerate(q.applied):
+ if statusentry.node in rootnodes:
+ # if one of the stripped roots is an applied
+ # patch, only part of the queue is stripped
+ start = i
+ break
+ del q.applied[start:end]
+ q.savedirty()
+
+ revs = list(rootnodes)
+ if update and opts.get('keep'):
+ wlock = repo.wlock()
+ try:
+ urev = repo.mq.qparents(repo, revs[0])
+ repo.dirstate.rebuild(urev, repo[urev].manifest())
+ repo.dirstate.write()
+ update = False
+ finally:
+ wlock.release()
+
+ if opts.get('bookmark'):
+ del marks[mark]
+ repo._writebookmarks(marks)
+ ui.write(_("bookmark '%s' deleted\n") % mark)
+
+ repo.mq.strip(repo, revs, backup=backup, update=update,
+ force=opts.get('force'))
+
+ return 0
+
+@command("qselect",
+ [('n', 'none', None, _('disable all guards')),
+ ('s', 'series', None, _('list all guards in series file')),
+ ('', 'pop', None, _('pop to before first guarded applied patch')),
+ ('', 'reapply', None, _('pop, then reapply patches'))],
+ _('hg qselect [OPTION]... [GUARD]...'))
+def select(ui, repo, *args, **opts):
+ '''set or print guarded patches to push
+
+ Use the :hg:`qguard` command to set or print guards on patch, then use
+ qselect to tell mq which guards to use. A patch will be pushed if
+ it has no guards or any positive guards match the currently
+ selected guard, but will not be pushed if any negative guards
+ match the current guard. For example::
+
+ qguard foo.patch -- -stable (negative guard)
+ qguard bar.patch +stable (positive guard)
+ qselect stable
+
+ This activates the "stable" guard. mq will skip foo.patch (because
+ it has a negative match) but push bar.patch (because it has a
+ positive match).
+
+ With no arguments, prints the currently active guards.
+ With one argument, sets the active guard.
+
+ Use -n/--none to deactivate guards (no other arguments needed).
+ When no guards are active, patches with positive guards are
+ skipped and patches with negative guards are pushed.
+
+ qselect can change the guards on applied patches. It does not pop
+ guarded patches by default. Use --pop to pop back to the last
+ applied patch that is not guarded. Use --reapply (which implies
+ --pop) to push back to the current patch afterwards, but skip
+ guarded patches.
+
+ Use -s/--series to print a list of all guards in the series file
+ (no other arguments needed). Use -v for more information.
+
+ Returns 0 on success.'''
+
+ q = repo.mq
+ guards = q.active()
+ if args or opts.get('none'):
+ old_unapplied = q.unapplied(repo)
+ old_guarded = [i for i in xrange(len(q.applied)) if
+ not q.pushable(i)[0]]
+ q.setactive(args)
+ q.savedirty()
+ if not args:
+ ui.status(_('guards deactivated\n'))
+ if not opts.get('pop') and not opts.get('reapply'):
+ unapplied = q.unapplied(repo)
+ guarded = [i for i in xrange(len(q.applied))
+ if not q.pushable(i)[0]]
+ if len(unapplied) != len(old_unapplied):
+ ui.status(_('number of unguarded, unapplied patches has '
+ 'changed from %d to %d\n') %
+ (len(old_unapplied), len(unapplied)))
+ if len(guarded) != len(old_guarded):
+ ui.status(_('number of guarded, applied patches has changed '
+ 'from %d to %d\n') %
+ (len(old_guarded), len(guarded)))
+ elif opts.get('series'):
+ guards = {}
+ noguards = 0
+ for gs in q.seriesguards:
+ if not gs:
+ noguards += 1
+ for g in gs:
+ guards.setdefault(g, 0)
+ guards[g] += 1
+ if ui.verbose:
+ guards['NONE'] = noguards
+ guards = guards.items()
+ guards.sort(key=lambda x: x[0][1:])
+ if guards:
+ ui.note(_('guards in series file:\n'))
+ for guard, count in guards:
+ ui.note('%2d ' % count)
+ ui.write(guard, '\n')
+ else:
+ ui.note(_('no guards in series file\n'))
+ else:
+ if guards:
+ ui.note(_('active guards:\n'))
+ for g in guards:
+ ui.write(g, '\n')
+ else:
+ ui.write(_('no active guards\n'))
+ reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
+ popped = False
+ if opts.get('pop') or opts.get('reapply'):
+ for i in xrange(len(q.applied)):
+ pushable, reason = q.pushable(i)
+ if not pushable:
+ ui.status(_('popping guarded patches\n'))
+ popped = True
+ if i == 0:
+ q.pop(repo, all=True)
+ else:
+ q.pop(repo, str(i - 1))
+ break
+ if popped:
+ try:
+ if reapply:
+ ui.status(_('reapplying unguarded patches\n'))
+ q.push(repo, reapply)
+ finally:
+ q.savedirty()
+
+@command("qfinish",
+ [('a', 'applied', None, _('finish all applied changesets'))],
+ _('hg qfinish [-a] [REV]...'))
+def finish(ui, repo, *revrange, **opts):
+ """move applied patches into repository history
+
+ Finishes the specified revisions (corresponding to applied
+ patches) by moving them out of mq control into regular repository
+ history.
+
+ Accepts a revision range or the -a/--applied option. If --applied
+ is specified, all applied mq revisions are removed from mq
+ control. Otherwise, the given revisions must be at the base of the
+ stack of applied patches.
+
+ This can be especially useful if your changes have been applied to
+ an upstream repository, or if you are about to push your changes
+ to upstream.
+
+ Returns 0 on success.
+ """
+ if not opts.get('applied') and not revrange:
+ raise util.Abort(_('no revisions specified'))
+ elif opts.get('applied'):
+ revrange = ('qbase::qtip',) + revrange
+
+ q = repo.mq
+ if not q.applied:
+ ui.status(_('no patches applied\n'))
+ return 0
+
+ revs = scmutil.revrange(repo, revrange)
+ if repo['.'].rev() in revs and repo[None].files():
+ ui.warn(_('warning: uncommitted changes in the working directory\n'))
+ # queue.finish may changes phases but leave the responsability to lock the
+ # repo to the caller to avoid deadlock with wlock. This command code is
+ # responsability for this locking.
+ lock = repo.lock()
+ try:
+ q.finish(repo, revs)
+ q.savedirty()
+ finally:
+ lock.release()
+ return 0
+
+@command("qqueue",
+ [('l', 'list', False, _('list all available queues')),
+ ('', 'active', False, _('print name of active queue')),
+ ('c', 'create', False, _('create new queue')),
+ ('', 'rename', False, _('rename active queue')),
+ ('', 'delete', False, _('delete reference to queue')),
+ ('', 'purge', False, _('delete queue, and remove patch dir')),
+ ],
+ _('[OPTION] [QUEUE]'))
+def qqueue(ui, repo, name=None, **opts):
+ '''manage multiple patch queues
+
+ Supports switching between different patch queues, as well as creating
+ new patch queues and deleting existing ones.
+
+ Omitting a queue name or specifying -l/--list will show you the registered
+ queues - by default the "normal" patches queue is registered. The currently
+ active queue will be marked with "(active)". Specifying --active will print
+ only the name of the active queue.
+
+ To create a new queue, use -c/--create. The queue is automatically made
+ active, except in the case where there are applied patches from the
+ currently active queue in the repository. Then the queue will only be
+ created and switching will fail.
+
+ To delete an existing queue, use --delete. You cannot delete the currently
+ active queue.
+
+ Returns 0 on success.
+ '''
+ q = repo.mq
+ _defaultqueue = 'patches'
+ _allqueues = 'patches.queues'
+ _activequeue = 'patches.queue'
+
+ def _getcurrent():
+ cur = os.path.basename(q.path)
+ if cur.startswith('patches-'):
+ cur = cur[8:]
+ return cur
+
+ def _noqueues():
+ try:
+ fh = repo.opener(_allqueues, 'r')
+ fh.close()
+ except IOError:
+ return True
+
+ return False
+
+ def _getqueues():
+ current = _getcurrent()
+
+ try:
+ fh = repo.opener(_allqueues, 'r')
+ queues = [queue.strip() for queue in fh if queue.strip()]
+ fh.close()
+ if current not in queues:
+ queues.append(current)
+ except IOError:
+ queues = [_defaultqueue]
+
+ return sorted(queues)
+
+ def _setactive(name):
+ if q.applied:
+ raise util.Abort(_('patches applied - cannot set new queue active'))
+ _setactivenocheck(name)
+
+ def _setactivenocheck(name):
+ fh = repo.opener(_activequeue, 'w')
+ if name != 'patches':
+ fh.write(name)
+ fh.close()
+
+ def _addqueue(name):
+ fh = repo.opener(_allqueues, 'a')
+ fh.write('%s\n' % (name,))
+ fh.close()
+
+ def _queuedir(name):
+ if name == 'patches':
+ return repo.join('patches')
+ else:
+ return repo.join('patches-' + name)
+
+ def _validname(name):
+ for n in name:
+ if n in ':\\/.':
+ return False
+ return True
+
+ def _delete(name):
+ if name not in existing:
+ raise util.Abort(_('cannot delete queue that does not exist'))
+
+ current = _getcurrent()
+
+ if name == current:
+ raise util.Abort(_('cannot delete currently active queue'))
+
+ fh = repo.opener('patches.queues.new', 'w')
+ for queue in existing:
+ if queue == name:
+ continue
+ fh.write('%s\n' % (queue,))
+ fh.close()
+ util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
+
+ if not name or opts.get('list') or opts.get('active'):
+ current = _getcurrent()
+ if opts.get('active'):
+ ui.write('%s\n' % (current,))
+ return
+ for queue in _getqueues():
+ ui.write('%s' % (queue,))
+ if queue == current and not ui.quiet:
+ ui.write(_(' (active)\n'))
+ else:
+ ui.write('\n')
+ return
+
+ if not _validname(name):
+ raise util.Abort(
+ _('invalid queue name, may not contain the characters ":\\/."'))
+
+ existing = _getqueues()
+
+ if opts.get('create'):
+ if name in existing:
+ raise util.Abort(_('queue "%s" already exists') % name)
+ if _noqueues():
+ _addqueue(_defaultqueue)
+ _addqueue(name)
+ _setactive(name)
+ elif opts.get('rename'):
+ current = _getcurrent()
+ if name == current:
+ raise util.Abort(_('can\'t rename "%s" to its current name') % name)
+ if name in existing:
+ raise util.Abort(_('queue "%s" already exists') % name)
+
+ olddir = _queuedir(current)
+ newdir = _queuedir(name)
+
+ if os.path.exists(newdir):
+ raise util.Abort(_('non-queue directory "%s" already exists') %
+ newdir)
+
+ fh = repo.opener('patches.queues.new', 'w')
+ for queue in existing:
+ if queue == current:
+ fh.write('%s\n' % (name,))
+ if os.path.exists(olddir):
+ util.rename(olddir, newdir)
+ else:
+ fh.write('%s\n' % (queue,))
+ fh.close()
+ util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
+ _setactivenocheck(name)
+ elif opts.get('delete'):
+ _delete(name)
+ elif opts.get('purge'):
+ if name in existing:
+ _delete(name)
+ qdir = _queuedir(name)
+ if os.path.exists(qdir):
+ shutil.rmtree(qdir)
+ else:
+ if name not in existing:
+ raise util.Abort(_('use --create to create a new queue'))
+ _setactive(name)
+
+def mqphasedefaults(repo, roots):
+ """callback used to set mq changeset as secret when no phase data exists"""
+ if repo.mq.applied:
+ if repo.ui.configbool('mq', 'secret', False):
+ mqphase = phases.secret
+ else:
+ mqphase = phases.draft
+ qbase = repo[repo.mq.applied[0].node]
+ roots[mqphase].add(qbase.node())
+ return roots
+
+def reposetup(ui, repo):
+ class mqrepo(repo.__class__):
+ @util.propertycache
+ def mq(self):
+ return queue(self.ui, self.path)
+
+ def abortifwdirpatched(self, errmsg, force=False):
+ if self.mq.applied and not force:
+ parents = self.dirstate.parents()
+ patches = [s.node for s in self.mq.applied]
+ if parents[0] in patches or parents[1] in patches:
+ raise util.Abort(errmsg)
+
+ def commit(self, text="", user=None, date=None, match=None,
+ force=False, editor=False, extra={}):
+ self.abortifwdirpatched(
+ _('cannot commit over an applied mq patch'),
+ force)
+
+ return super(mqrepo, self).commit(text, user, date, match, force,
+ editor, extra)
+
+ def checkpush(self, force, revs):
+ if self.mq.applied and not force:
+ outapplied = [e.node for e in self.mq.applied]
+ if revs:
+ # Assume applied patches have no non-patch descendants and
+ # are not on remote already. Filtering any changeset not
+ # pushed.
+ heads = set(revs)
+ for node in reversed(outapplied):
+ if node in heads:
+ break
+ else:
+ outapplied.pop()
+ # looking for pushed and shared changeset
+ for node in outapplied:
+ if repo[node].phase() < phases.secret:
+ raise util.Abort(_('source has mq patches applied'))
+ # no non-secret patches pushed
+ super(mqrepo, self).checkpush(force, revs)
+
+ def _findtags(self):
+ '''augment tags from base class with patch tags'''
+ result = super(mqrepo, self)._findtags()
+
+ q = self.mq
+ if not q.applied:
+ return result
+
+ mqtags = [(patch.node, patch.name) for patch in q.applied]
+
+ try:
+ self.changelog.rev(mqtags[-1][0])
+ except error.LookupError:
+ self.ui.warn(_('mq status file refers to unknown node %s\n')
+ % short(mqtags[-1][0]))
+ return result
+
+ mqtags.append((mqtags[-1][0], 'qtip'))
+ mqtags.append((mqtags[0][0], 'qbase'))
+ mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
+ tags = result[0]
+ for patch in mqtags:
+ if patch[1] in tags:
+ self.ui.warn(_('tag %s overrides mq patch of the same '
+ 'name\n') % patch[1])
+ else:
+ tags[patch[1]] = patch[0]
+
+ return result
+
+ def _branchtags(self, partial, lrev):
+ q = self.mq
+ cl = self.changelog
+ qbase = None
+ if not q.applied:
+ if getattr(self, '_committingpatch', False):
+ # Committing a new patch, must be tip
+ qbase = len(cl) - 1
+ else:
+ qbasenode = q.applied[0].node
+ try:
+ qbase = cl.rev(qbasenode)
+ except error.LookupError:
+ self.ui.warn(_('mq status file refers to unknown node %s\n')
+ % short(qbasenode))
+ if qbase is None:
+ return super(mqrepo, self)._branchtags(partial, lrev)
+
+ start = lrev + 1
+ if start < qbase:
+ # update the cache (excluding the patches) and save it
+ ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
+ self._updatebranchcache(partial, ctxgen)
+ self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
+ start = qbase
+ # if start = qbase, the cache is as updated as it should be.
+ # if start > qbase, the cache includes (part of) the patches.
+ # we might as well use it, but we won't save it.
+
+ # update the cache up to the tip
+ ctxgen = (self[r] for r in xrange(start, len(cl)))
+ self._updatebranchcache(partial, ctxgen)
+
+ return partial
+
+ if repo.local():
+ repo.__class__ = mqrepo
+
+ repo._phasedefaults.append(mqphasedefaults)
+
+def mqimport(orig, ui, repo, *args, **kwargs):
+ if (util.safehasattr(repo, 'abortifwdirpatched')
+ and not kwargs.get('no_commit', False)):
+ repo.abortifwdirpatched(_('cannot import over an applied patch'),
+ kwargs.get('force'))
+ return orig(ui, repo, *args, **kwargs)
+
+def mqinit(orig, ui, *args, **kwargs):
+ mq = kwargs.pop('mq', None)
+
+ if not mq:
+ return orig(ui, *args, **kwargs)
+
+ if args:
+ repopath = args[0]
+ if not hg.islocal(repopath):
+ raise util.Abort(_('only a local queue repository '
+ 'may be initialized'))
+ else:
+ repopath = cmdutil.findrepo(os.getcwd())
+ if not repopath:
+ raise util.Abort(_('there is no Mercurial repository here '
+ '(.hg not found)'))
+ repo = hg.repository(ui, repopath)
+ return qinit(ui, repo, True)
+
+def mqcommand(orig, ui, repo, *args, **kwargs):
+ """Add --mq option to operate on patch repository instead of main"""
+
+ # some commands do not like getting unknown options
+ mq = kwargs.pop('mq', None)
+
+ if not mq:
+ return orig(ui, repo, *args, **kwargs)
+
+ q = repo.mq
+ r = q.qrepo()
+ if not r:
+ raise util.Abort(_('no queue repository'))
+ return orig(r.ui, r, *args, **kwargs)
+
+def summary(orig, ui, repo, *args, **kwargs):
+ r = orig(ui, repo, *args, **kwargs)
+ q = repo.mq
+ m = []
+ a, u = len(q.applied), len(q.unapplied(repo))
+ if a:
+ m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
+ if u:
+ m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
+ if m:
+ ui.write("mq: %s\n" % ', '.join(m))
+ else:
+ ui.note(_("mq: (empty queue)\n"))
+ return r
+
+def revsetmq(repo, subset, x):
+ """``mq()``
+ Changesets managed by MQ.
+ """
+ revset.getargs(x, 0, 0, _("mq takes no arguments"))
+ applied = set([repo[r.node].rev() for r in repo.mq.applied])
+ return [r for r in subset if r in applied]
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = [revsetmq]
+
+def extsetup(ui):
+ # Ensure mq wrappers are called first, regardless of extension load order by
+ # NOT wrapping in uisetup() and instead deferring to init stage two here.
+ mqopt = [('', 'mq', None, _("operate on patch repository"))]
+
+ extensions.wrapcommand(commands.table, 'import', mqimport)
+ extensions.wrapcommand(commands.table, 'summary', summary)
+
+ entry = extensions.wrapcommand(commands.table, 'init', mqinit)
+ entry[1].extend(mqopt)
+
+ nowrap = set(commands.norepo.split(" "))
+
+ def dotable(cmdtable):
+ for cmd in cmdtable.keys():
+ cmd = cmdutil.parsealiases(cmd)[0]
+ if cmd in nowrap:
+ continue
+ entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
+ entry[1].extend(mqopt)
+
+ dotable(commands.table)
+
+ for extname, extmodule in extensions.extensions():
+ if extmodule.__file__ != __file__:
+ dotable(getattr(extmodule, 'cmdtable', {}))
+
+ revset.symbols['mq'] = revsetmq
+
+colortable = {'qguard.negative': 'red',
+ 'qguard.positive': 'yellow',
+ 'qguard.unguarded': 'green',
+ 'qseries.applied': 'blue bold underline',
+ 'qseries.guarded': 'black bold',
+ 'qseries.missing': 'red bold',
+ 'qseries.unapplied': 'black bold'}
diff --git a/hgext/notify.py b/hgext/notify.py
new file mode 100644
index 0000000..b0fbcee
--- /dev/null
+++ b/hgext/notify.py
@@ -0,0 +1,382 @@
+# notify.py - email notifications for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''hooks for sending email push notifications
+
+This extension implements hooks to send email notifications when
+changesets are sent from or received by the local repository.
+
+First, enable the extension as explained in :hg:`help extensions`, and
+register the hook you want to run. ``incoming`` and ``changegroup`` hooks
+are run when changesets are received, while ``outgoing`` hooks are for
+changesets sent to another repository::
+
+ [hooks]
+ # one email for each incoming changeset
+ incoming.notify = python:hgext.notify.hook
+ # one email for all incoming changesets
+ changegroup.notify = python:hgext.notify.hook
+
+ # one email for all outgoing changesets
+ outgoing.notify = python:hgext.notify.hook
+
+This registers the hooks. To enable notification, subscribers must
+be assigned to repositories. The ``[usersubs]`` section maps multiple
+repositories to a given recipient. The ``[reposubs]`` section maps
+multiple recipients to a single repository::
+
+ [usersubs]
+ # key is subscriber email, value is a comma-separated list of repo glob
+ # patterns
+ user@host = pattern
+
+ [reposubs]
+ # key is glob pattern, value is a comma-separated list of subscriber
+ # emails
+ pattern = user@host
+
+Glob patterns are matched against absolute path to repository
+root.
+
+In order to place them under direct user management, ``[usersubs]`` and
+``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
+incorporated by reference::
+
+ [notify]
+ config = /path/to/subscriptionsfile
+
+Notifications will not be sent until the ``notify.test`` value is set
+to ``False``; see below.
+
+Notifications content can be tweaked with the following configuration entries:
+
+notify.test
+ If ``True``, print messages to stdout instead of sending them. Default: True.
+
+notify.sources
+ Space-separated list of change sources. Notifications are activated only
+ when a changeset's source is in this list. Sources may be:
+
+ :``serve``: changesets received via http or ssh
+ :``pull``: changesets received via ``hg pull``
+ :``unbundle``: changesets received via ``hg unbundle``
+ :``push``: changesets sent or received via ``hg push``
+ :``bundle``: changesets sent via ``hg unbundle``
+
+ Default: serve.
+
+notify.strip
+ Number of leading slashes to strip from url paths. By default, notifications
+ reference repositories with their absolute path. ``notify.strip`` lets you
+ turn them into relative paths. For example, ``notify.strip=3`` will change
+ ``/long/path/repository`` into ``repository``. Default: 0.
+
+notify.domain
+ Default email domain for sender or recipients with no explicit domain.
+
+notify.style
+ Style file to use when formatting emails.
+
+notify.template
+ Template to use when formatting emails.
+
+notify.incoming
+ Template to use when run as an incoming hook, overriding ``notify.template``.
+
+notify.outgoing
+ Template to use when run as an outgoing hook, overriding ``notify.template``.
+
+notify.changegroup
+ Template to use when running as a changegroup hook, overriding
+ ``notify.template``.
+
+notify.maxdiff
+ Maximum number of diff lines to include in notification email. Set to 0
+ to disable the diff, or -1 to include all of it. Default: 300.
+
+notify.maxsubject
+ Maximum number of characters in email's subject line. Default: 67.
+
+notify.diffstat
+ Set to True to include a diffstat before diff content. Default: True.
+
+notify.merge
+ If True, send notifications for merge changesets. Default: True.
+
+notify.mbox
+ If set, append mails to this mbox file instead of sending. Default: None.
+
+notify.fromauthor
+ If set, use the committer of the first changeset in a changegroup for
+ the "From" field of the notification mail. If not set, take the user
+ from the pushing repo. Default: False.
+
+If set, the following entries will also be used to customize the
+notifications:
+
+email.from
+ Email ``From`` address to use if none can be found in the generated
+ email content.
+
+web.baseurl
+ Root repository URL to combine with repository paths when making
+ references. See also ``notify.strip``.
+
+'''
+
+from mercurial.i18n import _
+from mercurial import patch, cmdutil, templater, util, mail
+import email.Parser, email.Errors, fnmatch, socket, time
+
+testedwith = 'internal'
+
+# template for single changeset can include email headers.
+single_template = '''
+Subject: changeset in {webroot}: {desc|firstline|strip}
+From: {author}
+
+changeset {node|short} in {root}
+details: {baseurl}{webroot}?cmd=changeset;node={node|short}
+description:
+\t{desc|tabindent|strip}
+'''.lstrip()
+
+# template for multiple changesets should not contain email headers,
+# because only first set of headers will be used and result will look
+# strange.
+multiple_template = '''
+changeset {node|short} in {root}
+details: {baseurl}{webroot}?cmd=changeset;node={node|short}
+summary: {desc|firstline}
+'''
+
+deftemplates = {
+ 'changegroup': multiple_template,
+}
+
+class notifier(object):
+ '''email notification class.'''
+
+ def __init__(self, ui, repo, hooktype):
+ self.ui = ui
+ cfg = self.ui.config('notify', 'config')
+ if cfg:
+ self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
+ self.repo = repo
+ self.stripcount = int(self.ui.config('notify', 'strip', 0))
+ self.root = self.strip(self.repo.root)
+ self.domain = self.ui.config('notify', 'domain')
+ self.mbox = self.ui.config('notify', 'mbox')
+ self.test = self.ui.configbool('notify', 'test', True)
+ self.charsets = mail._charsets(self.ui)
+ self.subs = self.subscribers()
+ self.merge = self.ui.configbool('notify', 'merge', True)
+
+ mapfile = self.ui.config('notify', 'style')
+ template = (self.ui.config('notify', hooktype) or
+ self.ui.config('notify', 'template'))
+ self.t = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, mapfile, False)
+ if not mapfile and not template:
+ template = deftemplates.get(hooktype) or single_template
+ if template:
+ template = templater.parsestring(template, quoted=False)
+ self.t.use_template(template)
+
+ def strip(self, path):
+ '''strip leading slashes from local path, turn into web-safe path.'''
+
+ path = util.pconvert(path)
+ count = self.stripcount
+ while count > 0:
+ c = path.find('/')
+ if c == -1:
+ break
+ path = path[c + 1:]
+ count -= 1
+ return path
+
+ def fixmail(self, addr):
+ '''try to clean up email addresses.'''
+
+ addr = util.email(addr.strip())
+ if self.domain:
+ a = addr.find('@localhost')
+ if a != -1:
+ addr = addr[:a]
+ if '@' not in addr:
+ return addr + '@' + self.domain
+ return addr
+
+ def subscribers(self):
+ '''return list of email addresses of subscribers to this repo.'''
+ subs = set()
+ for user, pats in self.ui.configitems('usersubs'):
+ for pat in pats.split(','):
+ if fnmatch.fnmatch(self.repo.root, pat.strip()):
+ subs.add(self.fixmail(user))
+ for pat, users in self.ui.configitems('reposubs'):
+ if fnmatch.fnmatch(self.repo.root, pat):
+ for user in users.split(','):
+ subs.add(self.fixmail(user))
+ return [mail.addressencode(self.ui, s, self.charsets, self.test)
+ for s in sorted(subs)]
+
+ def node(self, ctx, **props):
+ '''format one changeset, unless it is a suppressed merge.'''
+ if not self.merge and len(ctx.parents()) > 1:
+ return False
+ self.t.show(ctx, changes=ctx.changeset(),
+ baseurl=self.ui.config('web', 'baseurl'),
+ root=self.repo.root, webroot=self.root, **props)
+ return True
+
+ def skipsource(self, source):
+ '''true if incoming changes from this source should be skipped.'''
+ ok_sources = self.ui.config('notify', 'sources', 'serve').split()
+ return source not in ok_sources
+
+ def send(self, ctx, count, data):
+ '''send message.'''
+
+ p = email.Parser.Parser()
+ try:
+ msg = p.parsestr(data)
+ except email.Errors.MessageParseError, inst:
+ raise util.Abort(inst)
+
+ # store sender and subject
+ sender, subject = msg['From'], msg['Subject']
+ del msg['From'], msg['Subject']
+
+ if not msg.is_multipart():
+ # create fresh mime message from scratch
+ # (multipart templates must take care of this themselves)
+ headers = msg.items()
+ payload = msg.get_payload()
+ # for notification prefer readability over data precision
+ msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
+ # reinstate custom headers
+ for k, v in headers:
+ msg[k] = v
+
+ msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+
+ # try to make subject line exist and be useful
+ if not subject:
+ if count > 1:
+ subject = _('%s: %d new changesets') % (self.root, count)
+ else:
+ s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
+ subject = '%s: %s' % (self.root, s)
+ maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
+ if maxsubject:
+ subject = util.ellipsis(subject, maxsubject)
+ msg['Subject'] = mail.headencode(self.ui, subject,
+ self.charsets, self.test)
+
+ # try to make message have proper sender
+ if not sender:
+ sender = self.ui.config('email', 'from') or self.ui.username()
+ if '@' not in sender or '@localhost' in sender:
+ sender = self.fixmail(sender)
+ msg['From'] = mail.addressencode(self.ui, sender,
+ self.charsets, self.test)
+
+ msg['X-Hg-Notification'] = 'changeset %s' % ctx
+ if not msg['Message-Id']:
+ msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
+ (ctx, int(time.time()),
+ hash(self.repo.root), socket.getfqdn()))
+ msg['To'] = ', '.join(self.subs)
+
+ msgtext = msg.as_string()
+ if self.test:
+ self.ui.write(msgtext)
+ if not msgtext.endswith('\n'):
+ self.ui.write('\n')
+ else:
+ self.ui.status(_('notify: sending %d subscribers %d changes\n') %
+ (len(self.subs), count))
+ mail.sendmail(self.ui, util.email(msg['From']),
+ self.subs, msgtext, mbox=self.mbox)
+
+ def diff(self, ctx, ref=None):
+
+ maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
+ prev = ctx.p1().node()
+ ref = ref and ref.node() or ctx.node()
+ chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
+ difflines = ''.join(chunks).splitlines()
+
+ if self.ui.configbool('notify', 'diffstat', True):
+ s = patch.diffstat(difflines)
+ # s may be nil, don't include the header if it is
+ if s:
+ self.ui.write('\ndiffstat:\n\n%s' % s)
+
+ if maxdiff == 0:
+ return
+ elif maxdiff > 0 and len(difflines) > maxdiff:
+ msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
+ self.ui.write(msg % (len(difflines), maxdiff))
+ difflines = difflines[:maxdiff]
+ elif difflines:
+ self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
+
+ self.ui.write("\n".join(difflines))
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+ '''send email notifications to interested subscribers.
+
+ if used as changegroup hook, send one email for all changesets in
+ changegroup. else send one email per changeset.'''
+
+ n = notifier(ui, repo, hooktype)
+ ctx = repo[node]
+
+ if not n.subs:
+ ui.debug('notify: no subscribers to repository %s\n' % n.root)
+ return
+ if n.skipsource(source):
+ ui.debug('notify: changes have source "%s" - skipping\n' % source)
+ return
+
+ ui.pushbuffer()
+ data = ''
+ count = 0
+ author = ''
+ if hooktype == 'changegroup' or hooktype == 'outgoing':
+ start, end = ctx.rev(), len(repo)
+ for rev in xrange(start, end):
+ if n.node(repo[rev]):
+ count += 1
+ if not author:
+ author = repo[rev].user()
+ else:
+ data += ui.popbuffer()
+ ui.note(_('notify: suppressing notification for merge %d:%s\n')
+ % (rev, repo[rev].hex()[:12]))
+ ui.pushbuffer()
+ if count:
+ n.diff(ctx, repo['tip'])
+ else:
+ if not n.node(ctx):
+ ui.popbuffer()
+ ui.note(_('notify: suppressing notification for merge %d:%s\n') %
+ (ctx.rev(), ctx.hex()[:12]))
+ return
+ count += 1
+ n.diff(ctx)
+
+ data += ui.popbuffer()
+ fromauthor = ui.config('notify', 'fromauthor')
+ if author and fromauthor:
+ data = '\n'.join(['From: %s' % author, data])
+
+ if count:
+ n.send(ctx, count, data)
diff --git a/hgext/pager.py b/hgext/pager.py
new file mode 100644
index 0000000..ae430ef
--- /dev/null
+++ b/hgext/pager.py
@@ -0,0 +1,140 @@
+# pager.py - display output using a pager
+#
+# Copyright 2008 David Soria Parra <dsp@php.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# To load the extension, add it to your configuration file:
+#
+# [extension]
+# pager =
+#
+# Run "hg help pager" to get info on configuration.
+
+'''browse command output with an external pager
+
+To set the pager that should be used, set the application variable::
+
+ [pager]
+ pager = less -FRX
+
+If no pager is set, the pager extensions uses the environment variable
+$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
+
+You can disable the pager for certain commands by adding them to the
+pager.ignore list::
+
+ [pager]
+ ignore = version, help, update
+
+You can also enable the pager only for certain commands using
+pager.attend. Below is the default list of commands to be paged::
+
+ [pager]
+ attend = annotate, cat, diff, export, glog, log, qdiff
+
+Setting pager.attend to an empty value will cause all commands to be
+paged.
+
+If pager.attend is present, pager.ignore will be ignored.
+
+To ignore global commands like :hg:`version` or :hg:`help`, you have
+to specify them in your user configuration file.
+
+The --pager=... option can also be used to control when the pager is
+used. Use a boolean value like yes, no, on, off, or use auto for
+normal behavior.
+'''
+
+import atexit, sys, os, signal, subprocess
+from mercurial import commands, dispatch, util, extensions
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+def _pagerfork(ui, p):
+ if not util.safehasattr(os, 'fork'):
+ sys.stdout = util.popen(p, 'wb')
+ if ui._isatty(sys.stderr):
+ sys.stderr = sys.stdout
+ return
+ fdin, fdout = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ os.close(fdin)
+ os.dup2(fdout, sys.stdout.fileno())
+ if ui._isatty(sys.stderr):
+ os.dup2(fdout, sys.stderr.fileno())
+ os.close(fdout)
+ return
+ os.dup2(fdin, sys.stdin.fileno())
+ os.close(fdin)
+ os.close(fdout)
+ try:
+ os.execvp('/bin/sh', ['/bin/sh', '-c', p])
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ # no /bin/sh, try executing the pager directly
+ args = shlex.split(p)
+ os.execvp(args[0], args)
+ else:
+ raise
+
+def _pagersubprocess(ui, p):
+ pager = subprocess.Popen(p, shell=True, bufsize=-1,
+ close_fds=util.closefds, stdin=subprocess.PIPE,
+ stdout=sys.stdout, stderr=sys.stderr)
+
+ stdout = os.dup(sys.stdout.fileno())
+ stderr = os.dup(sys.stderr.fileno())
+ os.dup2(pager.stdin.fileno(), sys.stdout.fileno())
+ if ui._isatty(sys.stderr):
+ os.dup2(pager.stdin.fileno(), sys.stderr.fileno())
+
+ @atexit.register
+ def killpager():
+ pager.stdin.close()
+ os.dup2(stdout, sys.stdout.fileno())
+ os.dup2(stderr, sys.stderr.fileno())
+ pager.wait()
+
+def _runpager(ui, p):
+ # The subprocess module shipped with Python <= 2.4 is buggy (issue3533).
+ # The compat version is buggy on Windows (issue3225), but has been shipping
+ # with hg for a long time. Preserve existing functionality.
+ if sys.version_info >= (2, 5):
+ _pagersubprocess(ui, p)
+ else:
+ _pagerfork(ui, p)
+
+def uisetup(ui):
+ if '--debugger' in sys.argv or not ui.formatted():
+ return
+
+ def pagecmd(orig, ui, options, cmd, cmdfunc):
+ p = ui.config("pager", "pager", os.environ.get("PAGER"))
+
+ if p:
+ attend = ui.configlist('pager', 'attend', attended)
+ auto = options['pager'] == 'auto'
+ always = util.parsebool(options['pager'])
+ if (always or auto and
+ (cmd in attend or
+ (cmd not in ui.configlist('pager', 'ignore') and not attend))):
+ ui.setconfig('ui', 'formatted', ui.formatted())
+ ui.setconfig('ui', 'interactive', False)
+ if util.safehasattr(signal, "SIGPIPE"):
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ _runpager(ui, p)
+ return orig(ui, options, cmd, cmdfunc)
+
+ extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
+
+def extsetup(ui):
+ commands.globalopts.append(
+ ('', 'pager', 'auto',
+ _("when to paginate (boolean, always, auto, or never)"),
+ _('TYPE')))
+
+attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
diff --git a/hgext/patchbomb.py b/hgext/patchbomb.py
new file mode 100644
index 0000000..7ac8e27
--- /dev/null
+++ b/hgext/patchbomb.py
@@ -0,0 +1,558 @@
+# patchbomb.py - sending Mercurial changesets as patch emails
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to send changesets as (a series of) patch emails
+
+The series is started off with a "[PATCH 0 of N]" introduction, which
+describes the series as a whole.
+
+Each patch email has a Subject line of "[PATCH M of N] ...", using the
+first line of the changeset description as the subject text. The
+message contains two or three body parts:
+
+- The changeset description.
+- [Optional] The result of running diffstat on the patch.
+- The patch itself, as generated by :hg:`export`.
+
+Each message refers to the first in the series using the In-Reply-To
+and References headers, so they will show up as a sequence in threaded
+mail and news readers, and in mail archives.
+
+To configure other defaults, add a section like this to your
+configuration file::
+
+ [email]
+ from = My Name <my@email>
+ to = recipient1, recipient2, ...
+ cc = cc1, cc2, ...
+ bcc = bcc1, bcc2, ...
+ reply-to = address1, address2, ...
+
+Use ``[patchbomb]`` as configuration section name if you need to
+override global ``[email]`` address settings.
+
+Then you can use the :hg:`email` command to mail a series of
+changesets as a patchbomb.
+
+You can also either configure the method option in the email section
+to be a sendmail compatible mailer or fill out the [smtp] section so
+that the patchbomb extension can automatically send patchbombs
+directly from the commandline. See the [email] and [smtp] sections in
+hgrc(5) for details.
+'''
+
+import os, errno, socket, tempfile, cStringIO
+import email.MIMEMultipart, email.MIMEBase
+import email.Utils, email.Encoders, email.Generator
+from mercurial import cmdutil, commands, hg, mail, patch, util
+from mercurial import scmutil
+from mercurial.i18n import _
+from mercurial.node import bin
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+def prompt(ui, prompt, default=None, rest=':'):
+ if default:
+ prompt += ' [%s]' % default
+ return ui.prompt(prompt + rest, default)
+
+def introwanted(opts, number):
+ '''is an introductory message apparently wanted?'''
+ return number > 1 or opts.get('intro') or opts.get('desc')
+
+def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
+ patchname=None):
+
+ desc = []
+ node = None
+ body = ''
+
+ for line in patchlines:
+ if line.startswith('#'):
+ if line.startswith('# Node ID'):
+ node = line.split()[-1]
+ continue
+ if line.startswith('diff -r') or line.startswith('diff --git'):
+ break
+ desc.append(line)
+
+ if not patchname and not node:
+ raise ValueError
+
+ if opts.get('attach') and not opts.get('body'):
+ body = ('\n'.join(desc[1:]).strip() or
+ 'Patch subject is complete summary.')
+ body += '\n\n\n'
+
+ if opts.get('plain'):
+ while patchlines and patchlines[0].startswith('# '):
+ patchlines.pop(0)
+ if patchlines:
+ patchlines.pop(0)
+ while patchlines and not patchlines[0].strip():
+ patchlines.pop(0)
+
+ ds = patch.diffstat(patchlines, git=opts.get('git'))
+ if opts.get('diffstat'):
+ body += ds + '\n\n'
+
+ addattachment = opts.get('attach') or opts.get('inline')
+ if not addattachment or opts.get('body'):
+ body += '\n'.join(patchlines)
+
+ if addattachment:
+ msg = email.MIMEMultipart.MIMEMultipart()
+ if body:
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
+ opts.get('test'))
+ binnode = bin(node)
+ # if node is mq patch, it will have the patch file's name as a tag
+ if not patchname:
+ patchtags = [t for t in repo.nodetags(binnode)
+ if t.endswith('.patch') or t.endswith('.diff')]
+ if patchtags:
+ patchname = patchtags[0]
+ elif total > 1:
+ patchname = cmdutil.makefilename(repo, '%b-%n.patch',
+ binnode, seqno=idx,
+ total=total)
+ else:
+ patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
+ disposition = 'inline'
+ if opts.get('attach'):
+ disposition = 'attachment'
+ p['Content-Disposition'] = disposition + '; filename=' + patchname
+ msg.attach(p)
+ else:
+ msg = mail.mimetextpatch(body, display=opts.get('test'))
+
+ flag = ' '.join(opts.get('flag'))
+ if flag:
+ flag = ' ' + flag
+
+ subj = desc[0].strip().rstrip('. ')
+ if not numbered:
+ subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
+ else:
+ tlen = len(str(total))
+ subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj)
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ msg['X-Mercurial-Node'] = node
+ return msg, subj, ds
+
+emailopts = [
+ ('', 'body', None, _('send patches as inline message text (default)')),
+ ('a', 'attach', None, _('send patches as attachments')),
+ ('i', 'inline', None, _('send patches as inline attachments')),
+ ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
+ ('c', 'cc', [], _('email addresses of copy recipients')),
+ ('', 'confirm', None, _('ask for confirmation before sending')),
+ ('d', 'diffstat', None, _('add diffstat output to messages')),
+ ('', 'date', '', _('use the given date as the sending date')),
+ ('', 'desc', '', _('use the given file as the series description')),
+ ('f', 'from', '', _('email address of sender')),
+ ('n', 'test', None, _('print messages that would be sent')),
+ ('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
+ ('', 'reply-to', [], _('email addresses replies should be sent to')),
+ ('s', 'subject', '', _('subject of first message (intro or single patch)')),
+ ('', 'in-reply-to', '', _('message identifier to reply to')),
+ ('', 'flag', [], _('flags to add in subject prefixes')),
+ ('t', 'to', [], _('email addresses of recipients'))]
+
+@command('email',
+ [('g', 'git', None, _('use git extended diff format')),
+ ('', 'plain', None, _('omit hg patch header')),
+ ('o', 'outgoing', None,
+ _('send changes not found in the target repository')),
+ ('b', 'bundle', None, _('send changes not in target as a binary bundle')),
+ ('', 'bundlename', 'bundle',
+ _('name of the bundle attachment file'), _('NAME')),
+ ('r', 'rev', [], _('a revision to send'), _('REV')),
+ ('', 'force', None, _('run even when remote repository is unrelated '
+ '(with -b/--bundle)')),
+ ('', 'base', [], _('a base changeset to specify instead of a destination '
+ '(with -b/--bundle)'), _('REV')),
+ ('', 'intro', None, _('send an introduction email for a single patch')),
+ ] + emailopts + commands.remoteopts,
+ _('hg email [OPTION]... [DEST]...'))
+def patchbomb(ui, repo, *revs, **opts):
+ '''send changesets by email
+
+ By default, diffs are sent in the format generated by
+ :hg:`export`, one per message. The series starts with a "[PATCH 0
+ of N]" introduction, which describes the series as a whole.
+
+ Each patch email has a Subject line of "[PATCH M of N] ...", using
+ the first line of the changeset description as the subject text.
+ The message contains two or three parts. First, the changeset
+ description.
+
+ With the -d/--diffstat option, if the diffstat program is
+ installed, the result of running diffstat on the patch is inserted.
+
+ Finally, the patch itself, as generated by :hg:`export`.
+
+ With the -d/--diffstat or -c/--confirm options, you will be presented
+ with a final summary of all messages and asked for confirmation before
+ the messages are sent.
+
+ By default the patch is included as text in the email body for
+ easy reviewing. Using the -a/--attach option will instead create
+ an attachment for the patch. With -i/--inline an inline attachment
+ will be created. You can include a patch both as text in the email
+ body and as a regular or an inline attachment by combining the
+ -a/--attach or -i/--inline with the --body option.
+
+ With -o/--outgoing, emails will be generated for patches not found
+ in the destination repository (or only those which are ancestors
+ of the specified revisions if any are provided)
+
+ With -b/--bundle, changesets are selected as for --outgoing, but a
+ single email containing a binary Mercurial bundle as an attachment
+ will be sent.
+
+ With -m/--mbox, instead of previewing each patchbomb message in a
+ pager or sending the messages directly, it will create a UNIX
+ mailbox file with the patch emails. This mailbox file can be
+ previewed with any mail user agent which supports UNIX mbox
+ files.
+
+ With -n/--test, all steps will run, but mail will not be sent.
+ You will be prompted for an email recipient address, a subject and
+ an introductory message describing the patches of your patchbomb.
+ Then when all is done, patchbomb messages are displayed. If the
+ PAGER environment variable is set, your pager will be fired up once
+ for each patchbomb message, so you can verify everything is alright.
+
+ In case email sending fails, you will find a backup of your series
+ introductory message in ``.hg/last-email.txt``.
+
+ Examples::
+
+ hg email -r 3000 # send patch 3000 only
+ hg email -r 3000 -r 3001 # send patches 3000 and 3001
+ hg email -r 3000:3005 # send patches 3000 through 3005
+ hg email 3000 # send patch 3000 (deprecated)
+
+ hg email -o # send all patches not in default
+ hg email -o DEST # send all patches not in DEST
+ hg email -o -r 3000 # send all ancestors of 3000 not in default
+ hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
+
+ hg email -b # send bundle of all patches not in default
+ hg email -b DEST # send bundle of all patches not in DEST
+ hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
+ hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
+
+ hg email -o -m mbox && # generate an mbox file...
+ mutt -R -f mbox # ... and view it with mutt
+ hg email -o -m mbox && # generate an mbox file ...
+ formail -s sendmail \\ # ... and use formail to send from the mbox
+ -bm -t < mbox # ... using sendmail
+
+ Before using this command, you will need to enable email in your
+ hgrc. See the [email] section in hgrc(5) for details.
+ '''
+
+ _charsets = mail._charsets(ui)
+
+ bundle = opts.get('bundle')
+ date = opts.get('date')
+ mbox = opts.get('mbox')
+ outgoing = opts.get('outgoing')
+ rev = opts.get('rev')
+ # internal option used by pbranches
+ patches = opts.get('patches')
+
+ def getoutgoing(dest, revs):
+ '''Return the revisions present locally but not in dest'''
+ url = ui.expandpath(dest or 'default-push', dest or 'default')
+ url = hg.parseurl(url)[0]
+ ui.status(_('comparing with %s\n') % util.hidepassword(url))
+
+ revs = [r for r in scmutil.revrange(repo, revs) if r >= 0]
+ if not revs:
+ revs = [len(repo) - 1]
+ revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
+ if not revs:
+ ui.status(_("no changes found\n"))
+ return []
+ return [str(r) for r in revs]
+
+ def getpatches(revs):
+ for r in scmutil.revrange(repo, revs):
+ output = cStringIO.StringIO()
+ cmdutil.export(repo, [r], fp=output,
+ opts=patch.diffopts(ui, opts))
+ yield output.getvalue().split('\n')
+
+ def getbundle(dest):
+ tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
+ tmpfn = os.path.join(tmpdir, 'bundle')
+ try:
+ commands.bundle(ui, repo, tmpfn, dest, **opts)
+ fp = open(tmpfn, 'rb')
+ data = fp.read()
+ fp.close()
+ return data
+ finally:
+ try:
+ os.unlink(tmpfn)
+ except OSError:
+ pass
+ os.rmdir(tmpdir)
+
+ if not (opts.get('test') or mbox):
+ # really sending
+ mail.validateconfig(ui)
+
+ if not (revs or rev or outgoing or bundle or patches):
+ raise util.Abort(_('specify at least one changeset with -r or -o'))
+
+ if outgoing and bundle:
+ raise util.Abort(_("--outgoing mode always on with --bundle;"
+ " do not re-specify --outgoing"))
+
+ if outgoing or bundle:
+ if len(revs) > 1:
+ raise util.Abort(_("too many destinations"))
+ dest = revs and revs[0] or None
+ revs = []
+
+ if rev:
+ if revs:
+ raise util.Abort(_('use only one form to specify the revision'))
+ revs = rev
+
+ if outgoing:
+ revs = getoutgoing(dest, rev)
+ if bundle:
+ opts['revs'] = revs
+
+ # start
+ if date:
+ start_time = util.parsedate(date)
+ else:
+ start_time = util.makedate()
+
+ def genmsgid(id):
+ return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
+
+ def getdescription(body, sender):
+ if opts.get('desc'):
+ body = open(opts.get('desc')).read()
+ else:
+ ui.write(_('\nWrite the introductory message for the '
+ 'patch series.\n\n'))
+ body = ui.edit(body, sender)
+ # Save series description in case sendmail fails
+ msgfile = repo.opener('last-email.txt', 'wb')
+ msgfile.write(body)
+ msgfile.close()
+ return body
+
+ def getpatchmsgs(patches, patchnames=None):
+ msgs = []
+
+ ui.write(_('this patch series consists of %d patches.\n\n')
+ % len(patches))
+
+ # build the intro message, or skip it if the user declines
+ if introwanted(opts, len(patches)):
+ msg = makeintro(patches)
+ if msg:
+ msgs.append(msg)
+
+ # are we going to send more than one message?
+ numbered = len(msgs) + len(patches) > 1
+
+ # now generate the actual patch messages
+ name = None
+ for i, p in enumerate(patches):
+ if patchnames:
+ name = patchnames[i]
+ msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
+ len(patches), numbered, name)
+ msgs.append(msg)
+
+ return msgs
+
+ def makeintro(patches):
+ tlen = len(str(len(patches)))
+
+ flag = opts.get('flag') or ''
+ if flag:
+ flag = ' ' + ' '.join(flag)
+ prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
+
+ subj = (opts.get('subject') or
+ prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
+ if not subj:
+ return None # skip intro if the user doesn't bother
+
+ subj = prefix + ' ' + subj
+
+ body = ''
+ if opts.get('diffstat'):
+ # generate a cumulative diffstat of the whole patch series
+ diffstat = patch.diffstat(sum(patches, []))
+ body = '\n' + diffstat
+ else:
+ diffstat = None
+
+ body = getdescription(body, sender)
+ msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+ msg['Subject'] = mail.headencode(ui, subj, _charsets,
+ opts.get('test'))
+ return (msg, subj, diffstat)
+
+ def getbundlemsgs(bundle):
+ subj = (opts.get('subject')
+ or prompt(ui, 'Subject:', 'A bundle for your repository'))
+
+ body = getdescription('', sender)
+ msg = email.MIMEMultipart.MIMEMultipart()
+ if body:
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
+ datapart.set_payload(bundle)
+ bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
+ datapart.add_header('Content-Disposition', 'attachment',
+ filename=bundlename)
+ email.Encoders.encode_base64(datapart)
+ msg.attach(datapart)
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ return [(msg, subj, None)]
+
+ sender = (opts.get('from') or ui.config('email', 'from') or
+ ui.config('patchbomb', 'from') or
+ prompt(ui, 'From', ui.username()))
+
+ if patches:
+ msgs = getpatchmsgs(patches, opts.get('patchnames'))
+ elif bundle:
+ msgs = getbundlemsgs(getbundle(dest))
+ else:
+ msgs = getpatchmsgs(list(getpatches(revs)))
+
+ showaddrs = []
+
+ def getaddrs(header, ask=False, default=None):
+ configkey = header.lower()
+ opt = header.replace('-', '_').lower()
+ addrs = opts.get(opt)
+ if addrs:
+ showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
+ return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
+
+ # not on the command line: fallback to config and then maybe ask
+ addr = (ui.config('email', configkey) or
+ ui.config('patchbomb', configkey) or
+ '')
+ if not addr and ask:
+ addr = prompt(ui, header, default=default)
+ if addr:
+ showaddrs.append('%s: %s' % (header, addr))
+ return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
+ else:
+ return default
+
+ to = getaddrs('To', ask=True)
+ if not to:
+ # we can get here in non-interactive mode
+ raise util.Abort(_('no recipient addresses provided'))
+ cc = getaddrs('Cc', ask=True, default='') or []
+ bcc = getaddrs('Bcc') or []
+ replyto = getaddrs('Reply-To')
+
+ if opts.get('diffstat') or opts.get('confirm'):
+ ui.write(_('\nFinal summary:\n\n'))
+ ui.write('From: %s\n' % sender)
+ for addr in showaddrs:
+ ui.write('%s\n' % addr)
+ for m, subj, ds in msgs:
+ ui.write('Subject: %s\n' % subj)
+ if ds:
+ ui.write(ds)
+ ui.write('\n')
+ if ui.promptchoice(_('are you sure you want to send (yn)?'),
+ (_('&Yes'), _('&No'))):
+ raise util.Abort(_('patchbomb canceled'))
+
+ ui.write('\n')
+
+ parent = opts.get('in_reply_to') or None
+ # angle brackets may be omitted, they're not semantically part of the msg-id
+ if parent is not None:
+ if not parent.startswith('<'):
+ parent = '<' + parent
+ if not parent.endswith('>'):
+ parent += '>'
+
+ first = True
+
+ sender_addr = email.Utils.parseaddr(sender)[1]
+ sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
+ sendmail = None
+ for i, (m, subj, ds) in enumerate(msgs):
+ try:
+ m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
+ except TypeError:
+ m['Message-Id'] = genmsgid('patchbomb')
+ if parent:
+ m['In-Reply-To'] = parent
+ m['References'] = parent
+ if first:
+ parent = m['Message-Id']
+ first = False
+
+ m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
+ m['Date'] = email.Utils.formatdate(start_time[0], localtime=True)
+
+ start_time = (start_time[0] + 1, start_time[1])
+ m['From'] = sender
+ m['To'] = ', '.join(to)
+ if cc:
+ m['Cc'] = ', '.join(cc)
+ if bcc:
+ m['Bcc'] = ', '.join(bcc)
+ if replyto:
+ m['Reply-To'] = ', '.join(replyto)
+ if opts.get('test'):
+ ui.status(_('displaying '), subj, ' ...\n')
+ ui.flush()
+ if 'PAGER' in os.environ and not ui.plain():
+ fp = util.popen(os.environ['PAGER'], 'w')
+ else:
+ fp = ui
+ generator = email.Generator.Generator(fp, mangle_from_=False)
+ try:
+ generator.flatten(m, 0)
+ fp.write('\n')
+ except IOError, inst:
+ if inst.errno != errno.EPIPE:
+ raise
+ if fp is not ui:
+ fp.close()
+ else:
+ if not sendmail:
+ sendmail = mail.connect(ui, mbox=mbox)
+ ui.status(_('sending '), subj, ' ...\n')
+ ui.progress(_('sending'), i, item=subj, total=len(msgs))
+ if not mbox:
+ # Exim does not remove the Bcc field
+ del m['Bcc']
+ fp = cStringIO.StringIO()
+ generator = email.Generator.Generator(fp, mangle_from_=False)
+ generator.flatten(m, 0)
+ sendmail(sender_addr, to + bcc + cc, fp.getvalue())
+
+ ui.progress(_('writing'), None)
+ ui.progress(_('sending'), None)
diff --git a/hgext/progress.py b/hgext/progress.py
new file mode 100644
index 0000000..3cc3747
--- /dev/null
+++ b/hgext/progress.py
@@ -0,0 +1,295 @@
+# progress.py show progress bars for some actions
+#
+# Copyright (C) 2010 Augie Fackler <durin42@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""show progress bars for some actions
+
+This extension uses the progress information logged by hg commands
+to draw progress bars that are as informative as possible. Some progress
+bars only offer indeterminate information, while others have a definite
+end point.
+
+The following settings are available::
+
+ [progress]
+ delay = 3 # number of seconds (float) before showing the progress bar
+ changedelay = 1 # changedelay: minimum delay before showing a new topic.
+ # If set to less than 3 * refresh, that value will
+ # be used instead.
+ refresh = 0.1 # time in seconds between refreshes of the progress bar
+ format = topic bar number estimate # format of the progress bar
+ width = <none> # if set, the maximum width of the progress information
+ # (that is, min(width, term width) will be used)
+ clear-complete = True # clear the progress bar after it's done
+ disable = False # if true, don't show a progress bar
+ assume-tty = False # if true, ALWAYS show a progress bar, unless
+ # disable is given
+
+Valid entries for the format field are topic, bar, number, unit,
+estimate, speed, and item. item defaults to the last 20 characters of
+the item, but this can be changed by adding either ``-<num>`` which
+would take the last num characters, or ``+<num>`` for the first num
+characters.
+"""
+
+import sys
+import time
+
+from mercurial.i18n import _
+testedwith = 'internal'
+
+def spacejoin(*args):
+ return ' '.join(s for s in args if s)
+
+def shouldprint(ui):
+ return ui._isatty(sys.stderr) or ui.configbool('progress', 'assume-tty')
+
+def fmtremaining(seconds):
+ if seconds < 60:
+ # i18n: format XX seconds as "XXs"
+ return _("%02ds") % (seconds)
+ minutes = seconds // 60
+ if minutes < 60:
+ seconds -= minutes * 60
+ # i18n: format X minutes and YY seconds as "XmYYs"
+ return _("%dm%02ds") % (minutes, seconds)
+ # we're going to ignore seconds in this case
+ minutes += 1
+ hours = minutes // 60
+ minutes -= hours * 60
+ if hours < 30:
+ # i18n: format X hours and YY minutes as "XhYYm"
+ return _("%dh%02dm") % (hours, minutes)
+ # we're going to ignore minutes in this case
+ hours += 1
+ days = hours // 24
+ hours -= days * 24
+ if days < 15:
+ # i18n: format X days and YY hours as "XdYYh"
+ return _("%dd%02dh") % (days, hours)
+ # we're going to ignore hours in this case
+ days += 1
+ weeks = days // 7
+ days -= weeks * 7
+ if weeks < 55:
+ # i18n: format X weeks and YY days as "XwYYd"
+ return _("%dw%02dd") % (weeks, days)
+ # we're going to ignore days and treat a year as 52 weeks
+ weeks += 1
+ years = weeks // 52
+ weeks -= years * 52
+ # i18n: format X years and YY weeks as "XyYYw"
+ return _("%dy%02dw") % (years, weeks)
+
+class progbar(object):
+ def __init__(self, ui):
+ self.ui = ui
+ self.resetstate()
+
+ def resetstate(self):
+ self.topics = []
+ self.topicstates = {}
+ self.starttimes = {}
+ self.startvals = {}
+ self.printed = False
+ self.lastprint = time.time() + float(self.ui.config(
+ 'progress', 'delay', default=3))
+ self.lasttopic = None
+ self.indetcount = 0
+ self.refresh = float(self.ui.config(
+ 'progress', 'refresh', default=0.1))
+ self.changedelay = max(3 * self.refresh,
+ float(self.ui.config(
+ 'progress', 'changedelay', default=1)))
+ self.order = self.ui.configlist(
+ 'progress', 'format',
+ default=['topic', 'bar', 'number', 'estimate'])
+
+ def show(self, now, topic, pos, item, unit, total):
+ if not shouldprint(self.ui):
+ return
+ termwidth = self.width()
+ self.printed = True
+ head = ''
+ needprogress = False
+ tail = ''
+ for indicator in self.order:
+ add = ''
+ if indicator == 'topic':
+ add = topic
+ elif indicator == 'number':
+ if total:
+ add = ('% ' + str(len(str(total))) +
+ 's/%s') % (pos, total)
+ else:
+ add = str(pos)
+ elif indicator.startswith('item') and item:
+ slice = 'end'
+ if '-' in indicator:
+ wid = int(indicator.split('-')[1])
+ elif '+' in indicator:
+ slice = 'beginning'
+ wid = int(indicator.split('+')[1])
+ else:
+ wid = 20
+ if slice == 'end':
+ add = item[-wid:]
+ else:
+ add = item[:wid]
+ add += (wid - len(add)) * ' '
+ elif indicator == 'bar':
+ add = ''
+ needprogress = True
+ elif indicator == 'unit' and unit:
+ add = unit
+ elif indicator == 'estimate':
+ add = self.estimate(topic, pos, total, now)
+ elif indicator == 'speed':
+ add = self.speed(topic, pos, unit, now)
+ if not needprogress:
+ head = spacejoin(head, add)
+ else:
+ tail = spacejoin(tail, add)
+ if needprogress:
+ used = 0
+ if head:
+ used += len(head) + 1
+ if tail:
+ used += len(tail) + 1
+ progwidth = termwidth - used - 3
+ if total and pos <= total:
+ amt = pos * progwidth // total
+ bar = '=' * (amt - 1)
+ if amt > 0:
+ bar += '>'
+ bar += ' ' * (progwidth - amt)
+ else:
+ progwidth -= 3
+ self.indetcount += 1
+ # mod the count by twice the width so we can make the
+ # cursor bounce between the right and left sides
+ amt = self.indetcount % (2 * progwidth)
+ amt -= progwidth
+ bar = (' ' * int(progwidth - abs(amt)) + '<=>' +
+ ' ' * int(abs(amt)))
+ prog = ''.join(('[', bar , ']'))
+ out = spacejoin(head, prog, tail)
+ else:
+ out = spacejoin(head, tail)
+ sys.stderr.write('\r' + out[:termwidth])
+ self.lasttopic = topic
+ sys.stderr.flush()
+
+ def clear(self):
+ if not shouldprint(self.ui):
+ return
+ sys.stderr.write('\r%s\r' % (' ' * self.width()))
+
+ def complete(self):
+ if not shouldprint(self.ui):
+ return
+ if self.ui.configbool('progress', 'clear-complete', default=True):
+ self.clear()
+ else:
+ sys.stderr.write('\n')
+ sys.stderr.flush()
+
+ def width(self):
+ tw = self.ui.termwidth()
+ return min(int(self.ui.config('progress', 'width', default=tw)), tw)
+
+ def estimate(self, topic, pos, total, now):
+ if total is None:
+ return ''
+ initialpos = self.startvals[topic]
+ target = total - initialpos
+ delta = pos - initialpos
+ if delta > 0:
+ elapsed = now - self.starttimes[topic]
+ if elapsed > float(
+ self.ui.config('progress', 'estimate', default=2)):
+ seconds = (elapsed * (target - delta)) // delta + 1
+ return fmtremaining(seconds)
+ return ''
+
+ def speed(self, topic, pos, unit, now):
+ initialpos = self.startvals[topic]
+ delta = pos - initialpos
+ elapsed = now - self.starttimes[topic]
+ if elapsed > float(
+ self.ui.config('progress', 'estimate', default=2)):
+ return _('%d %s/sec') % (delta / elapsed, unit)
+ return ''
+
+ def progress(self, topic, pos, item='', unit='', total=None):
+ now = time.time()
+ if pos is None:
+ self.starttimes.pop(topic, None)
+ self.startvals.pop(topic, None)
+ self.topicstates.pop(topic, None)
+ # reset the progress bar if this is the outermost topic
+ if self.topics and self.topics[0] == topic and self.printed:
+ self.complete()
+ self.resetstate()
+ # truncate the list of topics assuming all topics within
+ # this one are also closed
+ if topic in self.topics:
+ self.topics = self.topics[:self.topics.index(topic)]
+ else:
+ if topic not in self.topics:
+ self.starttimes[topic] = now
+ self.startvals[topic] = pos
+ self.topics.append(topic)
+ self.topicstates[topic] = pos, item, unit, total
+ if now - self.lastprint >= self.refresh and self.topics:
+ if (self.lasttopic is None # first time we printed
+ # not a topic change
+ or topic == self.lasttopic
+ # it's been long enough we should print anyway
+ or now - self.lastprint >= self.changedelay):
+ self.lastprint = now
+ self.show(now, topic, *self.topicstates[topic])
+
+_singleton = None
+
+def uisetup(ui):
+ global _singleton
+ class progressui(ui.__class__):
+ _progbar = None
+
+ def _quiet(self):
+ return self.debugflag or self.quiet
+
+ def progress(self, *args, **opts):
+ if not self._quiet():
+ self._progbar.progress(*args, **opts)
+ return super(progressui, self).progress(*args, **opts)
+
+ def write(self, *args, **opts):
+ if not self._quiet() and self._progbar.printed:
+ self._progbar.clear()
+ return super(progressui, self).write(*args, **opts)
+
+ def write_err(self, *args, **opts):
+ if not self._quiet() and self._progbar.printed:
+ self._progbar.clear()
+ return super(progressui, self).write_err(*args, **opts)
+
+ # Apps that derive a class from ui.ui() can use
+ # setconfig('progress', 'disable', 'True') to disable this extension
+ if ui.configbool('progress', 'disable'):
+ return
+ if shouldprint(ui) and not ui.debugflag and not ui.quiet:
+ ui.__class__ = progressui
+ # we instantiate one globally shared progress bar to avoid
+ # competing progress bars when multiple UI objects get created
+ if not progressui._progbar:
+ if _singleton is None:
+ _singleton = progbar(ui)
+ progressui._progbar = _singleton
+
+def reposetup(ui, repo):
+ uisetup(repo.ui)
diff --git a/hgext/purge.py b/hgext/purge.py
new file mode 100644
index 0000000..8ec0da4
--- /dev/null
+++ b/hgext/purge.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2006 - Marco Barisione <marco@barisione.org>
+#
+# This is a small extension for Mercurial (http://mercurial.selenic.com/)
+# that removes files not known to mercurial
+#
+# This program was inspired by the "cvspurge" script contained in CVS
+# utilities (http://www.red-bean.com/cvsutils/).
+#
+# For help on the usage of "hg purge" use:
+# hg help purge
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+'''command to delete untracked files from the working directory'''
+
+from mercurial import util, commands, cmdutil, scmutil
+from mercurial.i18n import _
+import os, stat
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+@command('purge|clean',
+ [('a', 'abort-on-err', None, _('abort if an error occurs')),
+ ('', 'all', None, _('purge ignored files too')),
+ ('p', 'print', None, _('print filenames instead of deleting them')),
+ ('0', 'print0', None, _('end filenames with NUL, for use with xargs'
+ ' (implies -p/--print)')),
+ ] + commands.walkopts,
+ _('hg purge [OPTION]... [DIR]...'))
+def purge(ui, repo, *dirs, **opts):
+ '''removes files not tracked by Mercurial
+
+ Delete files not known to Mercurial. This is useful to test local
+ and uncommitted changes in an otherwise-clean source tree.
+
+ This means that purge will delete:
+
+ - Unknown files: files marked with "?" by :hg:`status`
+ - Empty directories: in fact Mercurial ignores directories unless
+ they contain files under source control management
+
+ But it will leave untouched:
+
+ - Modified and unmodified tracked files
+ - Ignored files (unless --all is specified)
+ - New files added to the repository (with :hg:`add`)
+
+ If directories are given on the command line, only files in these
+ directories are considered.
+
+ Be careful with purge, as you could irreversibly delete some files
+ you forgot to add to the repository. If you only want to print the
+ list of files that this program would delete, use the --print
+ option.
+ '''
+ act = not opts['print']
+ eol = '\n'
+ if opts['print0']:
+ eol = '\0'
+ act = False # --print0 implies --print
+
+ def remove(remove_func, name):
+ if act:
+ try:
+ remove_func(repo.wjoin(name))
+ except OSError:
+ m = _('%s cannot be removed') % name
+ if opts['abort_on_err']:
+ raise util.Abort(m)
+ ui.warn(_('warning: %s\n') % m)
+ else:
+ ui.write('%s%s' % (name, eol))
+
+ def removefile(path):
+ try:
+ os.remove(path)
+ except OSError:
+ # read-only files cannot be unlinked under Windows
+ s = os.stat(path)
+ if (s.st_mode & stat.S_IWRITE) != 0:
+ raise
+ os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
+ os.remove(path)
+
+ directories = []
+ match = scmutil.match(repo[None], dirs, opts)
+ match.dir = directories.append
+ status = repo.status(match=match, ignored=opts['all'], unknown=True)
+
+ for f in sorted(status[4] + status[5]):
+ ui.note(_('removing file %s\n') % f)
+ remove(removefile, f)
+
+ for f in sorted(directories, reverse=True):
+ if match(f) and not os.listdir(repo.wjoin(f)):
+ ui.note(_('removing directory %s\n') % f)
+ remove(os.rmdir, f)
diff --git a/hgext/rebase.py b/hgext/rebase.py
new file mode 100644
index 0000000..f276fcf
--- /dev/null
+++ b/hgext/rebase.py
@@ -0,0 +1,708 @@
+# rebase.py - rebasing feature for mercurial
+#
+# Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to move sets of revisions to a different ancestor
+
+This extension lets you rebase changesets in an existing Mercurial
+repository.
+
+For more information:
+http://mercurial.selenic.com/wiki/RebaseExtension
+'''
+
+from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
+from mercurial import extensions, patch, scmutil, phases
+from mercurial.commands import templateopts
+from mercurial.node import nullrev
+from mercurial.lock import release
+from mercurial.i18n import _
+import os, errno
+
+nullmerge = -2
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+@command('rebase',
+ [('s', 'source', '',
+ _('rebase from the specified changeset'), _('REV')),
+ ('b', 'base', '',
+ _('rebase from the base of the specified changeset '
+ '(up to greatest common ancestor of base and dest)'),
+ _('REV')),
+ ('r', 'rev', [],
+ _('rebase these revisions'),
+ _('REV')),
+ ('d', 'dest', '',
+ _('rebase onto the specified changeset'), _('REV')),
+ ('', 'collapse', False, _('collapse the rebased changesets')),
+ ('m', 'message', '',
+ _('use text as collapse commit message'), _('TEXT')),
+ ('e', 'edit', False, _('invoke editor on commit messages')),
+ ('l', 'logfile', '',
+ _('read collapse commit message from file'), _('FILE')),
+ ('', 'keep', False, _('keep original changesets')),
+ ('', 'keepbranches', False, _('keep original branch names')),
+ ('D', 'detach', False, _('(DEPRECATED)')),
+ ('t', 'tool', '', _('specify merge tool')),
+ ('c', 'continue', False, _('continue an interrupted rebase')),
+ ('a', 'abort', False, _('abort an interrupted rebase'))] +
+ templateopts,
+ _('[-s REV | -b REV] [-d REV] [OPTION]'))
+def rebase(ui, repo, **opts):
+ """move changeset (and descendants) to a different branch
+
+ Rebase uses repeated merging to graft changesets from one part of
+ history (the source) onto another (the destination). This can be
+ useful for linearizing *local* changes relative to a master
+ development tree.
+
+ You should not rebase changesets that have already been shared
+ with others. Doing so will force everybody else to perform the
+ same rebase or they will end up with duplicated changesets after
+ pulling in your rebased changesets.
+
+ If you don't specify a destination changeset (``-d/--dest``),
+ rebase uses the tipmost head of the current named branch as the
+ destination. (The destination changeset is not modified by
+ rebasing, but new changesets are added as its descendants.)
+
+ You can specify which changesets to rebase in two ways: as a
+ "source" changeset or as a "base" changeset. Both are shorthand
+ for a topologically related set of changesets (the "source
+ branch"). If you specify source (``-s/--source``), rebase will
+ rebase that changeset and all of its descendants onto dest. If you
+ specify base (``-b/--base``), rebase will select ancestors of base
+ back to but not including the common ancestor with dest. Thus,
+ ``-b`` is less precise but more convenient than ``-s``: you can
+ specify any changeset in the source branch, and rebase will select
+ the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
+ uses the parent of the working directory as the base.
+
+ By default, rebase recreates the changesets in the source branch
+ as descendants of dest and then destroys the originals. Use
+ ``--keep`` to preserve the original source changesets. Some
+ changesets in the source branch (e.g. merges from the destination
+ branch) may be dropped if they no longer contribute any change.
+
+ One result of the rules for selecting the destination changeset
+ and source branch is that, unlike ``merge``, rebase will do
+ nothing if you are at the latest (tipmost) head of a named branch
+ with two heads. You need to explicitly specify source and/or
+ destination (or ``update`` to the other head, if it's the head of
+ the intended source branch).
+
+ If a rebase is interrupted to manually resolve a merge, it can be
+ continued with --continue/-c or aborted with --abort/-a.
+
+ Returns 0 on success, 1 if nothing to rebase.
+ """
+ originalwd = target = None
+ external = nullrev
+ state = {}
+ skipped = set()
+ targetancestors = set()
+
+ editor = None
+ if opts.get('edit'):
+ editor = cmdutil.commitforceeditor
+
+ lock = wlock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+
+ # Validate input and define rebasing points
+ destf = opts.get('dest', None)
+ srcf = opts.get('source', None)
+ basef = opts.get('base', None)
+ revf = opts.get('rev', [])
+ contf = opts.get('continue')
+ abortf = opts.get('abort')
+ collapsef = opts.get('collapse', False)
+ collapsemsg = cmdutil.logmessage(ui, opts)
+ extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
+ keepf = opts.get('keep', False)
+ keepbranchesf = opts.get('keepbranches', False)
+ # keepopen is not meant for use on the command line, but by
+ # other extensions
+ keepopen = opts.get('keepopen', False)
+
+ if collapsemsg and not collapsef:
+ raise util.Abort(
+ _('message can only be specified with collapse'))
+
+ if contf or abortf:
+ if contf and abortf:
+ raise util.Abort(_('cannot use both abort and continue'))
+ if collapsef:
+ raise util.Abort(
+ _('cannot use collapse with continue or abort'))
+ if srcf or basef or destf:
+ raise util.Abort(
+ _('abort and continue do not allow specifying revisions'))
+ if opts.get('tool', False):
+ ui.warn(_('tool option will be ignored\n'))
+
+ (originalwd, target, state, skipped, collapsef, keepf,
+ keepbranchesf, external) = restorestatus(repo)
+ if abortf:
+ return abort(repo, originalwd, target, state)
+ else:
+ if srcf and basef:
+ raise util.Abort(_('cannot specify both a '
+ 'source and a base'))
+ if revf and basef:
+ raise util.Abort(_('cannot specify both a '
+ 'revision and a base'))
+ if revf and srcf:
+ raise util.Abort(_('cannot specify both a '
+ 'revision and a source'))
+
+ cmdutil.bailifchanged(repo)
+
+ if not destf:
+ # Destination defaults to the latest revision in the
+ # current branch
+ branch = repo[None].branch()
+ dest = repo[branch]
+ else:
+ dest = scmutil.revsingle(repo, destf)
+
+ if revf:
+ rebaseset = repo.revs('%lr', revf)
+ elif srcf:
+ src = scmutil.revrange(repo, [srcf])
+ rebaseset = repo.revs('(%ld)::', src)
+ else:
+ base = scmutil.revrange(repo, [basef or '.'])
+ rebaseset = repo.revs(
+ '(children(ancestor(%ld, %d)) and ::(%ld))::',
+ base, dest, base)
+
+ if rebaseset:
+ root = min(rebaseset)
+ else:
+ root = None
+
+ if not rebaseset:
+ repo.ui.debug('base is ancestor of destination\n')
+ result = None
+ elif not keepf and list(repo.revs('first(children(%ld) - %ld)',
+ rebaseset, rebaseset)):
+ raise util.Abort(
+ _("can't remove original changesets with"
+ " unrebased descendants"),
+ hint=_('use --keep to keep original changesets'))
+ elif not keepf and not repo[root].mutable():
+ raise util.Abort(_("can't rebase immutable changeset %s")
+ % repo[root],
+ hint=_('see hg help phases for details'))
+ else:
+ result = buildstate(repo, dest, rebaseset, collapsef)
+
+ if not result:
+ # Empty state built, nothing to rebase
+ ui.status(_('nothing to rebase\n'))
+ return 1
+ else:
+ originalwd, target, state = result
+ if collapsef:
+ targetancestors = set(repo.changelog.ancestors([target]))
+ targetancestors.add(target)
+ external = checkexternal(repo, state, targetancestors)
+
+ if keepbranchesf:
+ assert not extrafn, 'cannot use both keepbranches and extrafn'
+ def extrafn(ctx, extra):
+ extra['branch'] = ctx.branch()
+ if collapsef:
+ branches = set()
+ for rev in state:
+ branches.add(repo[rev].branch())
+ if len(branches) > 1:
+ raise util.Abort(_('cannot collapse multiple named '
+ 'branches'))
+
+
+ # Rebase
+ if not targetancestors:
+ targetancestors = set(repo.changelog.ancestors([target]))
+ targetancestors.add(target)
+
+ # Keep track of the current bookmarks in order to reset them later
+ currentbookmarks = repo._bookmarks.copy()
+ activebookmark = repo._bookmarkcurrent
+ if activebookmark:
+ bookmarks.unsetcurrent(repo)
+
+ sortedstate = sorted(state)
+ total = len(sortedstate)
+ pos = 0
+ for rev in sortedstate:
+ pos += 1
+ if state[rev] == -1:
+ ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
+ _('changesets'), total)
+ storestatus(repo, originalwd, target, state, collapsef, keepf,
+ keepbranchesf, external)
+ p1, p2 = defineparents(repo, rev, target, state,
+ targetancestors)
+ if len(repo.parents()) == 2:
+ repo.ui.debug('resuming interrupted rebase\n')
+ else:
+ try:
+ ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ stats = rebasenode(repo, rev, p1, state, collapsef)
+ if stats and stats[3] > 0:
+ raise util.Abort(_('unresolved conflicts (see hg '
+ 'resolve, then hg rebase --continue)'))
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+ cmdutil.duplicatecopies(repo, rev, target)
+ if not collapsef:
+ newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
+ editor=editor)
+ else:
+ # Skip commit if we are collapsing
+ repo.setparents(repo[p1].node())
+ newrev = None
+ # Update the state
+ if newrev is not None:
+ state[rev] = repo[newrev].rev()
+ else:
+ if not collapsef:
+ ui.note(_('no changes, revision %d skipped\n') % rev)
+ ui.debug('next revision set to %s\n' % p1)
+ skipped.add(rev)
+ state[rev] = p1
+
+ ui.progress(_('rebasing'), None)
+ ui.note(_('rebase merging completed\n'))
+
+ if collapsef and not keepopen:
+ p1, p2 = defineparents(repo, min(state), target,
+ state, targetancestors)
+ if collapsemsg:
+ commitmsg = collapsemsg
+ else:
+ commitmsg = 'Collapsed revision'
+ for rebased in state:
+ if rebased not in skipped and state[rebased] != nullmerge:
+ commitmsg += '\n* %s' % repo[rebased].description()
+ commitmsg = ui.edit(commitmsg, repo.ui.username())
+ newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
+ extrafn=extrafn, editor=editor)
+
+ if 'qtip' in repo.tags():
+ updatemq(repo, state, skipped, **opts)
+
+ if currentbookmarks:
+ # Nodeids are needed to reset bookmarks
+ nstate = {}
+ for k, v in state.iteritems():
+ if v != nullmerge:
+ nstate[repo[k].node()] = repo[v].node()
+
+ if not keepf:
+ # Remove no more useful revisions
+ rebased = [rev for rev in state if state[rev] != nullmerge]
+ if rebased:
+ if set(repo.changelog.descendants([min(rebased)])) - set(state):
+ ui.warn(_("warning: new changesets detected "
+ "on source branch, not stripping\n"))
+ else:
+ # backup the old csets by default
+ repair.strip(ui, repo, repo[min(rebased)].node(), "all")
+
+ if currentbookmarks:
+ updatebookmarks(repo, nstate, currentbookmarks, **opts)
+
+ clearstatus(repo)
+ ui.note(_("rebase completed\n"))
+ if os.path.exists(repo.sjoin('undo')):
+ util.unlinkpath(repo.sjoin('undo'))
+ if skipped:
+ ui.note(_("%d revisions have been skipped\n") % len(skipped))
+
+ if (activebookmark and
+ repo['tip'].node() == repo._bookmarks[activebookmark]):
+ bookmarks.setcurrent(repo, activebookmark)
+
+ finally:
+ release(lock, wlock)
+
+def checkexternal(repo, state, targetancestors):
+ """Check whether one or more external revisions need to be taken in
+ consideration. In the latter case, abort.
+ """
+ external = nullrev
+ source = min(state)
+ for rev in state:
+ if rev == source:
+ continue
+ # Check externals and fail if there are more than one
+ for p in repo[rev].parents():
+ if (p.rev() not in state
+ and p.rev() not in targetancestors):
+ if external != nullrev:
+ raise util.Abort(_('unable to collapse, there is more '
+ 'than one external parent'))
+ external = p.rev()
+ return external
+
+def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
+ 'Commit the changes and store useful information in extra'
+ try:
+ repo.setparents(repo[p1].node(), repo[p2].node())
+ ctx = repo[rev]
+ if commitmsg is None:
+ commitmsg = ctx.description()
+ extra = {'rebase_source': ctx.hex()}
+ if extrafn:
+ extrafn(ctx, extra)
+ # Commit might fail if unresolved files exist
+ newrev = repo.commit(text=commitmsg, user=ctx.user(),
+ date=ctx.date(), extra=extra, editor=editor)
+ repo.dirstate.setbranch(repo[newrev].branch())
+ targetphase = max(ctx.phase(), phases.draft)
+ # retractboundary doesn't overwrite upper phase inherited from parent
+ newnode = repo[newrev].node()
+ if newnode:
+ phases.retractboundary(repo, targetphase, [newnode])
+ return newrev
+ except util.Abort:
+ # Invalidate the previous setparents
+ repo.dirstate.invalidate()
+ raise
+
+def rebasenode(repo, rev, p1, state, collapse):
+ 'Rebase a single revision'
+ # Merge phase
+ # Update to target and merge it with local
+ if repo['.'].rev() != repo[p1].rev():
+ repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
+ merge.update(repo, p1, False, True, False)
+ else:
+ repo.ui.debug(" already in target\n")
+ repo.dirstate.write()
+ repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
+ base = None
+ if repo[rev].rev() != repo[min(state)].rev():
+ base = repo[rev].p1().node()
+ # When collapsing in-place, the parent is the common ancestor, we
+ # have to allow merging with it.
+ return merge.update(repo, rev, True, True, False, base, collapse)
+
+def defineparents(repo, rev, target, state, targetancestors):
+ 'Return the new parent relationship of the revision that will be rebased'
+ parents = repo[rev].parents()
+ p1 = p2 = nullrev
+
+ P1n = parents[0].rev()
+ if P1n in targetancestors:
+ p1 = target
+ elif P1n in state:
+ if state[P1n] == nullmerge:
+ p1 = target
+ else:
+ p1 = state[P1n]
+ else: # P1n external
+ p1 = target
+ p2 = P1n
+
+ if len(parents) == 2 and parents[1].rev() not in targetancestors:
+ P2n = parents[1].rev()
+ # interesting second parent
+ if P2n in state:
+ if p1 == target: # P1n in targetancestors or external
+ p1 = state[P2n]
+ else:
+ p2 = state[P2n]
+ else: # P2n external
+ if p2 != nullrev: # P1n external too => rev is a merged revision
+ raise util.Abort(_('cannot use revision %d as base, result '
+ 'would have 3 parents') % rev)
+ p2 = P2n
+ repo.ui.debug(" future parents are %d and %d\n" %
+ (repo[p1].rev(), repo[p2].rev()))
+ return p1, p2
+
+def isagitpatch(repo, patchname):
+ 'Return true if the given patch is in git format'
+ mqpatch = os.path.join(repo.mq.path, patchname)
+ for line in patch.linereader(file(mqpatch, 'rb')):
+ if line.startswith('diff --git'):
+ return True
+ return False
+
+def updatemq(repo, state, skipped, **opts):
+ 'Update rebased mq patches - finalize and then import them'
+ mqrebase = {}
+ mq = repo.mq
+ original_series = mq.fullseries[:]
+ skippedpatches = set()
+
+ for p in mq.applied:
+ rev = repo[p.node].rev()
+ if rev in state:
+ repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
+ (rev, p.name))
+ mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
+ else:
+ # Applied but not rebased, not sure this should happen
+ skippedpatches.add(p.name)
+
+ if mqrebase:
+ mq.finish(repo, mqrebase.keys())
+
+ # We must start import from the newest revision
+ for rev in sorted(mqrebase, reverse=True):
+ if rev not in skipped:
+ name, isgit = mqrebase[rev]
+ repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
+ mq.qimport(repo, (), patchname=name, git=isgit,
+ rev=[str(state[rev])])
+ else:
+ # Rebased and skipped
+ skippedpatches.add(mqrebase[rev][0])
+
+ # Patches were either applied and rebased and imported in
+ # order, applied and removed or unapplied. Discard the removed
+ # ones while preserving the original series order and guards.
+ newseries = [s for s in original_series
+ if mq.guard_re.split(s, 1)[0] not in skippedpatches]
+ mq.fullseries[:] = newseries
+ mq.seriesdirty = True
+ mq.savedirty()
+
+def updatebookmarks(repo, nstate, originalbookmarks, **opts):
+ 'Move bookmarks to their correct changesets'
+ for k, v in originalbookmarks.iteritems():
+ if v in nstate:
+ if nstate[v] != nullmerge:
+ # update the bookmarks for revs that have moved
+ repo._bookmarks[k] = nstate[v]
+
+ bookmarks.write(repo)
+
+def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
+ external):
+ 'Store the current status to allow recovery'
+ f = repo.opener("rebasestate", "w")
+ f.write(repo[originalwd].hex() + '\n')
+ f.write(repo[target].hex() + '\n')
+ f.write(repo[external].hex() + '\n')
+ f.write('%d\n' % int(collapse))
+ f.write('%d\n' % int(keep))
+ f.write('%d\n' % int(keepbranches))
+ for d, v in state.iteritems():
+ oldrev = repo[d].hex()
+ if v != nullmerge:
+ newrev = repo[v].hex()
+ else:
+ newrev = v
+ f.write("%s:%s\n" % (oldrev, newrev))
+ f.close()
+ repo.ui.debug('rebase status stored\n')
+
+def clearstatus(repo):
+ 'Remove the status files'
+ if os.path.exists(repo.join("rebasestate")):
+ util.unlinkpath(repo.join("rebasestate"))
+
+def restorestatus(repo):
+ 'Restore a previously stored status'
+ try:
+ target = None
+ collapse = False
+ external = nullrev
+ state = {}
+ f = repo.opener("rebasestate")
+ for i, l in enumerate(f.read().splitlines()):
+ if i == 0:
+ originalwd = repo[l].rev()
+ elif i == 1:
+ target = repo[l].rev()
+ elif i == 2:
+ external = repo[l].rev()
+ elif i == 3:
+ collapse = bool(int(l))
+ elif i == 4:
+ keep = bool(int(l))
+ elif i == 5:
+ keepbranches = bool(int(l))
+ else:
+ oldrev, newrev = l.split(':')
+ if newrev != str(nullmerge):
+ state[repo[oldrev].rev()] = repo[newrev].rev()
+ else:
+ state[repo[oldrev].rev()] = int(newrev)
+ skipped = set()
+ # recompute the set of skipped revs
+ if not collapse:
+ seen = set([target])
+ for old, new in sorted(state.items()):
+ if new != nullrev and new in seen:
+ skipped.add(old)
+ seen.add(new)
+ repo.ui.debug('computed skipped revs: %s\n' % skipped)
+ repo.ui.debug('rebase status resumed\n')
+ return (originalwd, target, state, skipped,
+ collapse, keep, keepbranches, external)
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ raise util.Abort(_('no rebase in progress'))
+
+def abort(repo, originalwd, target, state):
+ 'Restore the repository to its original state'
+ dstates = [s for s in state.values() if s != nullrev]
+ immutable = [d for d in dstates if not repo[d].mutable()]
+ if immutable:
+ raise util.Abort(_("can't abort rebase due to immutable changesets %s")
+ % ', '.join(str(repo[r]) for r in immutable),
+ hint=_('see hg help phases for details'))
+
+ descendants = set()
+ if dstates:
+ descendants = set(repo.changelog.descendants(dstates))
+ if descendants - set(dstates):
+ repo.ui.warn(_("warning: new changesets detected on target branch, "
+ "can't abort\n"))
+ return -1
+ else:
+ # Strip from the first rebased revision
+ merge.update(repo, repo[originalwd].rev(), False, True, False)
+ rebased = filter(lambda x: x > -1 and x != target, state.values())
+ if rebased:
+ strippoint = min(rebased)
+ # no backup of rebased cset versions needed
+ repair.strip(repo.ui, repo, repo[strippoint].node())
+ clearstatus(repo)
+ repo.ui.warn(_('rebase aborted\n'))
+ return 0
+
+def buildstate(repo, dest, rebaseset, collapse):
+ '''Define which revisions are going to be rebased and where
+
+ repo: repo
+ dest: context
+ rebaseset: set of rev
+ '''
+
+ # This check isn't strictly necessary, since mq detects commits over an
+ # applied patch. But it prevents messing up the working directory when
+ # a partially completed rebase is blocked by mq.
+ if 'qtip' in repo.tags() and (dest.node() in
+ [s.node for s in repo.mq.applied]):
+ raise util.Abort(_('cannot rebase onto an applied mq patch'))
+
+ roots = list(repo.set('roots(%ld)', rebaseset))
+ if not roots:
+ raise util.Abort(_('no matching revisions'))
+ if len(roots) > 1:
+ raise util.Abort(_("can't rebase multiple roots"))
+ root = roots[0]
+
+ commonbase = root.ancestor(dest)
+ if commonbase == root:
+ raise util.Abort(_('source is ancestor of destination'))
+ if commonbase == dest:
+ samebranch = root.branch() == dest.branch()
+ if not collapse and samebranch and root in dest.children():
+ repo.ui.debug('source is a child of destination\n')
+ return None
+
+ repo.ui.debug('rebase onto %d starting from %d\n' % (dest, root))
+ state = dict.fromkeys(rebaseset, nullrev)
+ # Rebase tries to turn <dest> into a parent of <root> while
+ # preserving the number of parents of rebased changesets:
+ #
+ # - A changeset with a single parent will always be rebased as a
+ # changeset with a single parent.
+ #
+ # - A merge will be rebased as merge unless its parents are both
+ # ancestors of <dest> or are themselves in the rebased set and
+ # pruned while rebased.
+ #
+ # If one parent of <root> is an ancestor of <dest>, the rebased
+ # version of this parent will be <dest>. This is always true with
+ # --base option.
+ #
+ # Otherwise, we need to *replace* the original parents with
+ # <dest>. This "detaches" the rebased set from its former location
+ # and rebases it onto <dest>. Changes introduced by ancestors of
+ # <root> not common with <dest> (the detachset, marked as
+ # nullmerge) are "removed" from the rebased changesets.
+ #
+ # - If <root> has a single parent, set it to <dest>.
+ #
+ # - If <root> is a merge, we cannot decide which parent to
+ # replace, the rebase operation is not clearly defined.
+ #
+ # The table below sums up this behavior:
+ #
+ # +--------------------+----------------------+-------------------------+
+ # | | one parent | merge |
+ # +--------------------+----------------------+-------------------------+
+ # | parent in ::<dest> | new parent is <dest> | parents in ::<dest> are |
+ # | | | remapped to <dest> |
+ # +--------------------+----------------------+-------------------------+
+ # | unrelated source | new parent is <dest> | ambiguous, abort |
+ # +--------------------+----------------------+-------------------------+
+ #
+ # The actual abort is handled by `defineparents`
+ if len(root.parents()) <= 1:
+ # (strict) ancestors of <root> not ancestors of <dest>
+ detachset = repo.revs('::%d - ::%d - %d', root, commonbase, root)
+ state.update(dict.fromkeys(detachset, nullmerge))
+ return repo['.'].rev(), dest.rev(), state
+
+def pullrebase(orig, ui, repo, *args, **opts):
+ 'Call rebase after pull if the latter has been invoked with --rebase'
+ if opts.get('rebase'):
+ if opts.get('update'):
+ del opts['update']
+ ui.debug('--update and --rebase are not compatible, ignoring '
+ 'the update flag\n')
+
+ movemarkfrom = repo['.'].node()
+ cmdutil.bailifchanged(repo)
+ revsprepull = len(repo)
+ origpostincoming = commands.postincoming
+ def _dummy(*args, **kwargs):
+ pass
+ commands.postincoming = _dummy
+ try:
+ orig(ui, repo, *args, **opts)
+ finally:
+ commands.postincoming = origpostincoming
+ revspostpull = len(repo)
+ if revspostpull > revsprepull:
+ rebase(ui, repo, **opts)
+ branch = repo[None].branch()
+ dest = repo[branch].rev()
+ if dest != repo['.'].rev():
+ # there was nothing to rebase we force an update
+ hg.update(repo, dest)
+ if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
+ ui.status(_("updating bookmark %s\n")
+ % repo._bookmarkcurrent)
+ else:
+ if opts.get('tool'):
+ raise util.Abort(_('--tool can only be used with --rebase'))
+ orig(ui, repo, *args, **opts)
+
+def uisetup(ui):
+ 'Replace pull with a decorator to provide --rebase option'
+ entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
+ entry[1].append(('', 'rebase', None,
+ _("rebase working directory to branch head")))
+ entry[1].append(('t', 'tool', '',
+ _("specify merge tool for rebase")))
diff --git a/hgext/record.py b/hgext/record.py
new file mode 100644
index 0000000..ec9e384
--- /dev/null
+++ b/hgext/record.py
@@ -0,0 +1,666 @@
+# record.py
+#
+# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''commands to interactively select changes for commit/qrefresh'''
+
+from mercurial.i18n import gettext, _
+from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
+from mercurial import util
+import copy, cStringIO, errno, os, re, shutil, tempfile
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
+
+diffopts = [
+ ('w', 'ignore-all-space', False,
+ _('ignore white space when comparing lines')),
+ ('b', 'ignore-space-change', None,
+ _('ignore changes in the amount of white space')),
+ ('B', 'ignore-blank-lines', None,
+ _('ignore changes whose lines are all blank')),
+]
+
+def scanpatch(fp):
+ """like patch.iterhunks, but yield different events
+
+ - ('file', [header_lines + fromfile + tofile])
+ - ('context', [context_lines])
+ - ('hunk', [hunk_lines])
+ - ('range', (-start,len, +start,len, diffp))
+ """
+ lr = patch.linereader(fp)
+
+ def scanwhile(first, p):
+ """scan lr while predicate holds"""
+ lines = [first]
+ while True:
+ line = lr.readline()
+ if not line:
+ break
+ if p(line):
+ lines.append(line)
+ else:
+ lr.push(line)
+ break
+ return lines
+
+ while True:
+ line = lr.readline()
+ if not line:
+ break
+ if line.startswith('diff --git a/') or line.startswith('diff -r '):
+ def notheader(line):
+ s = line.split(None, 1)
+ return not s or s[0] not in ('---', 'diff')
+ header = scanwhile(line, notheader)
+ fromfile = lr.readline()
+ if fromfile.startswith('---'):
+ tofile = lr.readline()
+ header += [fromfile, tofile]
+ else:
+ lr.push(fromfile)
+ yield 'file', header
+ elif line[0] == ' ':
+ yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
+ elif line[0] in '-+':
+ yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
+ else:
+ m = lines_re.match(line)
+ if m:
+ yield 'range', m.groups()
+ else:
+ raise patch.PatchError('unknown patch content: %r' % line)
+
+class header(object):
+ """patch header
+
+ XXX shoudn't we move this to mercurial/patch.py ?
+ """
+ diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
+ diff_re = re.compile('diff -r .* (.*)$')
+ allhunks_re = re.compile('(?:index|new file|deleted file) ')
+ pretty_re = re.compile('(?:new file|deleted file) ')
+ special_re = re.compile('(?:index|new|deleted|copy|rename) ')
+
+ def __init__(self, header):
+ self.header = header
+ self.hunks = []
+
+ def binary(self):
+ return util.any(h.startswith('index ') for h in self.header)
+
+ def pretty(self, fp):
+ for h in self.header:
+ if h.startswith('index '):
+ fp.write(_('this modifies a binary file (all or nothing)\n'))
+ break
+ if self.pretty_re.match(h):
+ fp.write(h)
+ if self.binary():
+ fp.write(_('this is a binary file\n'))
+ break
+ if h.startswith('---'):
+ fp.write(_('%d hunks, %d lines changed\n') %
+ (len(self.hunks),
+ sum([max(h.added, h.removed) for h in self.hunks])))
+ break
+ fp.write(h)
+
+ def write(self, fp):
+ fp.write(''.join(self.header))
+
+ def allhunks(self):
+ return util.any(self.allhunks_re.match(h) for h in self.header)
+
+ def files(self):
+ match = self.diffgit_re.match(self.header[0])
+ if match:
+ fromfile, tofile = match.groups()
+ if fromfile == tofile:
+ return [fromfile]
+ return [fromfile, tofile]
+ else:
+ return self.diff_re.match(self.header[0]).groups()
+
+ def filename(self):
+ return self.files()[-1]
+
+ def __repr__(self):
+ return '<header %s>' % (' '.join(map(repr, self.files())))
+
+ def special(self):
+ return util.any(self.special_re.match(h) for h in self.header)
+
+def countchanges(hunk):
+ """hunk -> (n+,n-)"""
+ add = len([h for h in hunk if h[0] == '+'])
+ rem = len([h for h in hunk if h[0] == '-'])
+ return add, rem
+
+class hunk(object):
+ """patch hunk
+
+ XXX shouldn't we merge this with patch.hunk ?
+ """
+ maxcontext = 3
+
+ def __init__(self, header, fromline, toline, proc, before, hunk, after):
+ def trimcontext(number, lines):
+ delta = len(lines) - self.maxcontext
+ if False and delta > 0:
+ return number + delta, lines[:self.maxcontext]
+ return number, lines
+
+ self.header = header
+ self.fromline, self.before = trimcontext(fromline, before)
+ self.toline, self.after = trimcontext(toline, after)
+ self.proc = proc
+ self.hunk = hunk
+ self.added, self.removed = countchanges(self.hunk)
+
+ def write(self, fp):
+ delta = len(self.before) + len(self.after)
+ if self.after and self.after[-1] == '\\ No newline at end of file\n':
+ delta -= 1
+ fromlen = delta + self.removed
+ tolen = delta + self.added
+ fp.write('@@ -%d,%d +%d,%d @@%s\n' %
+ (self.fromline, fromlen, self.toline, tolen,
+ self.proc and (' ' + self.proc)))
+ fp.write(''.join(self.before + self.hunk + self.after))
+
+ pretty = write
+
+ def filename(self):
+ return self.header.filename()
+
+ def __repr__(self):
+ return '<hunk %r@%d>' % (self.filename(), self.fromline)
+
+def parsepatch(fp):
+ """patch -> [] of headers -> [] of hunks """
+ class parser(object):
+ """patch parsing state machine"""
+ def __init__(self):
+ self.fromline = 0
+ self.toline = 0
+ self.proc = ''
+ self.header = None
+ self.context = []
+ self.before = []
+ self.hunk = []
+ self.headers = []
+
+ def addrange(self, limits):
+ fromstart, fromend, tostart, toend, proc = limits
+ self.fromline = int(fromstart)
+ self.toline = int(tostart)
+ self.proc = proc
+
+ def addcontext(self, context):
+ if self.hunk:
+ h = hunk(self.header, self.fromline, self.toline, self.proc,
+ self.before, self.hunk, context)
+ self.header.hunks.append(h)
+ self.fromline += len(self.before) + h.removed
+ self.toline += len(self.before) + h.added
+ self.before = []
+ self.hunk = []
+ self.proc = ''
+ self.context = context
+
+ def addhunk(self, hunk):
+ if self.context:
+ self.before = self.context
+ self.context = []
+ self.hunk = hunk
+
+ def newfile(self, hdr):
+ self.addcontext([])
+ h = header(hdr)
+ self.headers.append(h)
+ self.header = h
+
+ def finished(self):
+ self.addcontext([])
+ return self.headers
+
+ transitions = {
+ 'file': {'context': addcontext,
+ 'file': newfile,
+ 'hunk': addhunk,
+ 'range': addrange},
+ 'context': {'file': newfile,
+ 'hunk': addhunk,
+ 'range': addrange},
+ 'hunk': {'context': addcontext,
+ 'file': newfile,
+ 'range': addrange},
+ 'range': {'context': addcontext,
+ 'hunk': addhunk},
+ }
+
+ p = parser()
+
+ state = 'context'
+ for newstate, data in scanpatch(fp):
+ try:
+ p.transitions[state][newstate](p, data)
+ except KeyError:
+ raise patch.PatchError('unhandled transition: %s -> %s' %
+ (state, newstate))
+ state = newstate
+ return p.finished()
+
+def filterpatch(ui, headers):
+ """Interactively filter patch chunks into applied-only chunks"""
+
+ def prompt(skipfile, skipall, query, chunk):
+ """prompt query, and process base inputs
+
+ - y/n for the rest of file
+ - y/n for the rest
+ - ? (help)
+ - q (quit)
+
+ Return True/False and possibly updated skipfile and skipall.
+ """
+ newpatches = None
+ if skipall is not None:
+ return skipall, skipfile, skipall, newpatches
+ if skipfile is not None:
+ return skipfile, skipfile, skipall, newpatches
+ while True:
+ resps = _('[Ynesfdaq?]')
+ choices = (_('&Yes, record this change'),
+ _('&No, skip this change'),
+ _('&Edit the change manually'),
+ _('&Skip remaining changes to this file'),
+ _('Record remaining changes to this &file'),
+ _('&Done, skip remaining changes and files'),
+ _('Record &all changes to all remaining files'),
+ _('&Quit, recording no changes'),
+ _('&?'))
+ r = ui.promptchoice("%s %s" % (query, resps), choices)
+ ui.write("\n")
+ if r == 8: # ?
+ doc = gettext(record.__doc__)
+ c = doc.find('::') + 2
+ for l in doc[c:].splitlines():
+ if l.startswith(' '):
+ ui.write(l.strip(), '\n')
+ continue
+ elif r == 0: # yes
+ ret = True
+ elif r == 1: # no
+ ret = False
+ elif r == 2: # Edit patch
+ if chunk is None:
+ ui.write(_('cannot edit patch for whole file'))
+ ui.write("\n")
+ continue
+ if chunk.header.binary():
+ ui.write(_('cannot edit patch for binary file'))
+ ui.write("\n")
+ continue
+ # Patch comment based on the Git one (based on comment at end of
+ # http://mercurial.selenic.com/wiki/RecordExtension)
+ phelp = '---' + _("""
+To remove '-' lines, make them ' ' lines (context).
+To remove '+' lines, delete them.
+Lines starting with # will be removed from the patch.
+
+If the patch applies cleanly, the edited hunk will immediately be
+added to the record list. If it does not apply cleanly, a rejects
+file will be generated: you can use that when you try again. If
+all lines of the hunk are removed, then the edit is aborted and
+the hunk is left unchanged.
+""")
+ (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
+ suffix=".diff", text=True)
+ ncpatchfp = None
+ try:
+ # Write the initial patch
+ f = os.fdopen(patchfd, "w")
+ chunk.header.write(f)
+ chunk.write(f)
+ f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
+ f.close()
+ # Start the editor and wait for it to complete
+ editor = ui.geteditor()
+ util.system("%s \"%s\"" % (editor, patchfn),
+ environ={'HGUSER': ui.username()},
+ onerr=util.Abort, errprefix=_("edit failed"),
+ out=ui.fout)
+ # Remove comment lines
+ patchfp = open(patchfn)
+ ncpatchfp = cStringIO.StringIO()
+ for line in patchfp:
+ if not line.startswith('#'):
+ ncpatchfp.write(line)
+ patchfp.close()
+ ncpatchfp.seek(0)
+ newpatches = parsepatch(ncpatchfp)
+ finally:
+ os.unlink(patchfn)
+ del ncpatchfp
+ # Signal that the chunk shouldn't be applied as-is, but
+ # provide the new patch to be used instead.
+ ret = False
+ elif r == 3: # Skip
+ ret = skipfile = False
+ elif r == 4: # file (Record remaining)
+ ret = skipfile = True
+ elif r == 5: # done, skip remaining
+ ret = skipall = False
+ elif r == 6: # all
+ ret = skipall = True
+ elif r == 7: # quit
+ raise util.Abort(_('user quit'))
+ return ret, skipfile, skipall, newpatches
+
+ seen = set()
+ applied = {} # 'filename' -> [] of chunks
+ skipfile, skipall = None, None
+ pos, total = 1, sum(len(h.hunks) for h in headers)
+ for h in headers:
+ pos += len(h.hunks)
+ skipfile = None
+ fixoffset = 0
+ hdr = ''.join(h.header)
+ if hdr in seen:
+ continue
+ seen.add(hdr)
+ if skipall is None:
+ h.pretty(ui)
+ msg = (_('examine changes to %s?') %
+ _(' and ').join("'%s'" % f for f in h.files()))
+ r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
+ if not r:
+ continue
+ applied[h.filename()] = [h]
+ if h.allhunks():
+ applied[h.filename()] += h.hunks
+ continue
+ for i, chunk in enumerate(h.hunks):
+ if skipfile is None and skipall is None:
+ chunk.pretty(ui)
+ if total == 1:
+ msg = _('record this change to %r?') % chunk.filename()
+ else:
+ idx = pos - len(h.hunks) + i
+ msg = _('record change %d/%d to %r?') % (idx, total,
+ chunk.filename())
+ r, skipfile, skipall, newpatches = prompt(skipfile,
+ skipall, msg, chunk)
+ if r:
+ if fixoffset:
+ chunk = copy.copy(chunk)
+ chunk.toline += fixoffset
+ applied[chunk.filename()].append(chunk)
+ elif newpatches is not None:
+ for newpatch in newpatches:
+ for newhunk in newpatch.hunks:
+ if fixoffset:
+ newhunk.toline += fixoffset
+ applied[newhunk.filename()].append(newhunk)
+ else:
+ fixoffset += chunk.removed - chunk.added
+ return sum([h for h in applied.itervalues()
+ if h[0].special() or len(h) > 1], [])
+
+@command("record",
+ # same options as commit + white space diff options
+ commands.table['^commit|ci'][1][:] + diffopts,
+ _('hg record [OPTION]... [FILE]...'))
+def record(ui, repo, *pats, **opts):
+ '''interactively select changes to commit
+
+ If a list of files is omitted, all changes reported by :hg:`status`
+ will be candidates for recording.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ You will be prompted for whether to record changes to each
+ modified file, and for files with multiple changes, for each
+ change to use. For each query, the following responses are
+ possible::
+
+ y - record this change
+ n - skip this change
+ e - edit this change manually
+
+ s - skip remaining changes to this file
+ f - record remaining changes to this file
+
+ d - done, skip remaining changes and files
+ a - record all changes to all remaining files
+ q - quit, recording no changes
+
+ ? - display help
+
+ This command is not available when committing a merge.'''
+
+ dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
+
+def qrefresh(origfn, ui, repo, *pats, **opts):
+ if not opts['interactive']:
+ return origfn(ui, repo, *pats, **opts)
+
+ mq = extensions.find('mq')
+
+ def committomq(ui, repo, *pats, **opts):
+ # At this point the working copy contains only changes that
+ # were accepted. All other changes were reverted.
+ # We can't pass *pats here since qrefresh will undo all other
+ # changed files in the patch that aren't in pats.
+ mq.refresh(ui, repo, **opts)
+
+ # backup all changed files
+ dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
+
+def qrecord(ui, repo, patch, *pats, **opts):
+ '''interactively record a new patch
+
+ See :hg:`help qnew` & :hg:`help record` for more information and
+ usage.
+ '''
+
+ try:
+ mq = extensions.find('mq')
+ except KeyError:
+ raise util.Abort(_("'mq' extension not loaded"))
+
+ repo.mq.checkpatchname(patch)
+
+ def committomq(ui, repo, *pats, **opts):
+ opts['checkname'] = False
+ mq.new(ui, repo, patch, *pats, **opts)
+
+ dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
+
+def qnew(origfn, ui, repo, patch, *args, **opts):
+ if opts['interactive']:
+ return qrecord(ui, repo, patch, *args, **opts)
+ return origfn(ui, repo, patch, *args, **opts)
+
+def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
+ if not ui.interactive():
+ raise util.Abort(_('running non-interactively, use %s instead') %
+ cmdsuggest)
+
+ def recordfunc(ui, repo, message, match, opts):
+ """This is generic record driver.
+
+ Its job is to interactively filter local changes, and
+ accordingly prepare working directory into a state in which the
+ job can be delegated to a non-interactive commit command such as
+ 'commit' or 'qrefresh'.
+
+ After the actual job is done by non-interactive command, the
+ working directory is restored to its original state.
+
+ In the end we'll record interesting changes, and everything else
+ will be left in place, so the user can continue working.
+ """
+
+ merge = len(repo[None].parents()) > 1
+ if merge:
+ raise util.Abort(_('cannot partially commit a merge '
+ '(use "hg commit" instead)'))
+
+ changes = repo.status(match=match)[:3]
+ diffopts = mdiff.diffopts(
+ git=True, nodates=True,
+ ignorews=opts.get('ignore_all_space'),
+ ignorewsamount=opts.get('ignore_space_change'),
+ ignoreblanklines=opts.get('ignore_blank_lines'))
+ chunks = patch.diff(repo, changes=changes, opts=diffopts)
+ fp = cStringIO.StringIO()
+ fp.write(''.join(chunks))
+ fp.seek(0)
+
+ # 1. filter patch, so we have intending-to apply subset of it
+ chunks = filterpatch(ui, parsepatch(fp))
+ del fp
+
+ contenders = set()
+ for h in chunks:
+ try:
+ contenders.update(set(h.files()))
+ except AttributeError:
+ pass
+
+ changed = changes[0] + changes[1] + changes[2]
+ newfiles = [f for f in changed if f in contenders]
+ if not newfiles:
+ ui.status(_('no changes to record\n'))
+ return 0
+
+ modified = set(changes[0])
+
+ # 2. backup changed files, so we can restore them in the end
+ if backupall:
+ tobackup = changed
+ else:
+ tobackup = [f for f in newfiles if f in modified]
+
+ backups = {}
+ if tobackup:
+ backupdir = repo.join('record-backups')
+ try:
+ os.mkdir(backupdir)
+ except OSError, err:
+ if err.errno != errno.EEXIST:
+ raise
+ try:
+ # backup continues
+ for f in tobackup:
+ fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
+ dir=backupdir)
+ os.close(fd)
+ ui.debug('backup %r as %r\n' % (f, tmpname))
+ util.copyfile(repo.wjoin(f), tmpname)
+ shutil.copystat(repo.wjoin(f), tmpname)
+ backups[f] = tmpname
+
+ fp = cStringIO.StringIO()
+ for c in chunks:
+ if c.filename() in backups:
+ c.write(fp)
+ dopatch = fp.tell()
+ fp.seek(0)
+
+ # 3a. apply filtered patch to clean repo (clean)
+ if backups:
+ hg.revert(repo, repo.dirstate.p1(),
+ lambda key: key in backups)
+
+ # 3b. (apply)
+ if dopatch:
+ try:
+ ui.debug('applying patch\n')
+ ui.debug(fp.getvalue())
+ patch.internalpatch(ui, repo, fp, 1, eolmode=None)
+ except patch.PatchError, err:
+ raise util.Abort(str(err))
+ del fp
+
+ # 4. We prepared working directory according to filtered
+ # patch. Now is the time to delegate the job to
+ # commit/qrefresh or the like!
+
+ # it is important to first chdir to repo root -- we'll call
+ # a highlevel command with list of pathnames relative to
+ # repo root
+ cwd = os.getcwd()
+ os.chdir(repo.root)
+ try:
+ commitfunc(ui, repo, *newfiles, **opts)
+ finally:
+ os.chdir(cwd)
+
+ return 0
+ finally:
+ # 5. finally restore backed-up files
+ try:
+ for realname, tmpname in backups.iteritems():
+ ui.debug('restoring %r to %r\n' % (tmpname, realname))
+ util.copyfile(tmpname, repo.wjoin(realname))
+ # Our calls to copystat() here and above are a
+ # hack to trick any editors that have f open that
+ # we haven't modified them.
+ #
+ # Also note that this racy as an editor could
+ # notice the file's mtime before we've finished
+ # writing it.
+ shutil.copystat(tmpname, repo.wjoin(realname))
+ os.unlink(tmpname)
+ if tobackup:
+ os.rmdir(backupdir)
+ except OSError:
+ pass
+
+ # wrap ui.write so diff output can be labeled/colorized
+ def wrapwrite(orig, *args, **kw):
+ label = kw.pop('label', '')
+ for chunk, l in patch.difflabel(lambda: args):
+ orig(chunk, label=label + l)
+ oldwrite = ui.write
+ extensions.wrapfunction(ui, 'write', wrapwrite)
+ try:
+ return cmdutil.commit(ui, repo, recordfunc, pats, opts)
+ finally:
+ ui.write = oldwrite
+
+cmdtable["qrecord"] = \
+ (qrecord, [], # placeholder until mq is available
+ _('hg qrecord [OPTION]... PATCH [FILE]...'))
+
+def uisetup(ui):
+ try:
+ mq = extensions.find('mq')
+ except KeyError:
+ return
+
+ cmdtable["qrecord"] = \
+ (qrecord,
+ # same options as qnew, but copy them so we don't get
+ # -i/--interactive for qrecord and add white space diff options
+ mq.cmdtable['^qnew'][1][:] + diffopts,
+ _('hg qrecord [OPTION]... PATCH [FILE]...'))
+
+ _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
+ _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
+ _("interactively select changes to refresh"))
+
+def _wrapcmd(cmd, table, wrapfn, msg):
+ entry = extensions.wrapcommand(table, cmd, wrapfn)
+ entry[1].append(('i', 'interactive', None, msg))
diff --git a/hgext/relink.py b/hgext/relink.py
new file mode 100644
index 0000000..f2e6bf1
--- /dev/null
+++ b/hgext/relink.py
@@ -0,0 +1,184 @@
+# Mercurial extension to provide 'hg relink' command
+#
+# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""recreates hardlinks between repository clones"""
+
+from mercurial import hg, util
+from mercurial.i18n import _
+import os, stat
+
+testedwith = 'internal'
+
+def relink(ui, repo, origin=None, **opts):
+ """recreate hardlinks between two repositories
+
+ When repositories are cloned locally, their data files will be
+ hardlinked so that they only use the space of a single repository.
+
+ Unfortunately, subsequent pulls into either repository will break
+ hardlinks for any files touched by the new changesets, even if
+ both repositories end up pulling the same changes.
+
+ Similarly, passing --rev to "hg clone" will fail to use any
+ hardlinks, falling back to a complete copy of the source
+ repository.
+
+ This command lets you recreate those hardlinks and reclaim that
+ wasted space.
+
+ This repository will be relinked to share space with ORIGIN, which
+ must be on the same local disk. If ORIGIN is omitted, looks for
+ "default-relink", then "default", in [paths].
+
+ Do not attempt any read operations on this repository while the
+ command is running. (Both repositories will be locked against
+ writes.)
+ """
+ if (not util.safehasattr(util, 'samefile') or
+ not util.safehasattr(util, 'samedevice')):
+ raise util.Abort(_('hardlinks are not supported on this system'))
+ src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
+ origin or 'default'))
+ ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
+ if repo.root == src.root:
+ ui.status(_('there is nothing to relink\n'))
+ return
+
+ locallock = repo.lock()
+ try:
+ remotelock = src.lock()
+ try:
+ candidates = sorted(collect(src, ui))
+ targets = prune(candidates, src.store.path, repo.store.path, ui)
+ do_relink(src.store.path, repo.store.path, targets, ui)
+ finally:
+ remotelock.release()
+ finally:
+ locallock.release()
+
+def collect(src, ui):
+ seplen = len(os.path.sep)
+ candidates = []
+ live = len(src['tip'].manifest())
+ # Your average repository has some files which were deleted before
+ # the tip revision. We account for that by assuming that there are
+ # 3 tracked files for every 2 live files as of the tip version of
+ # the repository.
+ #
+ # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
+ total = live * 3 // 2
+ src = src.store.path
+ pos = 0
+ ui.status(_("tip has %d files, estimated total number of files: %s\n")
+ % (live, total))
+ for dirpath, dirnames, filenames in os.walk(src):
+ dirnames.sort()
+ relpath = dirpath[len(src) + seplen:]
+ for filename in sorted(filenames):
+ if filename[-2:] not in ('.d', '.i'):
+ continue
+ st = os.stat(os.path.join(dirpath, filename))
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ pos += 1
+ candidates.append((os.path.join(relpath, filename), st))
+ ui.progress(_('collecting'), pos, filename, _('files'), total)
+
+ ui.progress(_('collecting'), None)
+ ui.status(_('collected %d candidate storage files\n') % len(candidates))
+ return candidates
+
+def prune(candidates, src, dst, ui):
+ def linkfilter(src, dst, st):
+ try:
+ ts = os.stat(dst)
+ except OSError:
+ # Destination doesn't have this file?
+ return False
+ if util.samefile(src, dst):
+ return False
+ if not util.samedevice(src, dst):
+ # No point in continuing
+ raise util.Abort(
+ _('source and destination are on different devices'))
+ if st.st_size != ts.st_size:
+ return False
+ return st
+
+ targets = []
+ total = len(candidates)
+ pos = 0
+ for fn, st in candidates:
+ pos += 1
+ srcpath = os.path.join(src, fn)
+ tgt = os.path.join(dst, fn)
+ ts = linkfilter(srcpath, tgt, st)
+ if not ts:
+ ui.debug('not linkable: %s\n' % fn)
+ continue
+ targets.append((fn, ts.st_size))
+ ui.progress(_('pruning'), pos, fn, _('files'), total)
+
+ ui.progress(_('pruning'), None)
+ ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
+ return targets
+
+def do_relink(src, dst, files, ui):
+ def relinkfile(src, dst):
+ bak = dst + '.bak'
+ os.rename(dst, bak)
+ try:
+ util.oslink(src, dst)
+ except OSError:
+ os.rename(bak, dst)
+ raise
+ os.remove(bak)
+
+ CHUNKLEN = 65536
+ relinked = 0
+ savedbytes = 0
+
+ pos = 0
+ total = len(files)
+ for f, sz in files:
+ pos += 1
+ source = os.path.join(src, f)
+ tgt = os.path.join(dst, f)
+ # Binary mode, so that read() works correctly, especially on Windows
+ sfp = file(source, 'rb')
+ dfp = file(tgt, 'rb')
+ sin = sfp.read(CHUNKLEN)
+ while sin:
+ din = dfp.read(CHUNKLEN)
+ if sin != din:
+ break
+ sin = sfp.read(CHUNKLEN)
+ sfp.close()
+ dfp.close()
+ if sin:
+ ui.debug('not linkable: %s\n' % f)
+ continue
+ try:
+ relinkfile(source, tgt)
+ ui.progress(_('relinking'), pos, f, _('files'), total)
+ relinked += 1
+ savedbytes += sz
+ except OSError, inst:
+ ui.warn('%s: %s\n' % (tgt, str(inst)))
+
+ ui.progress(_('relinking'), None)
+
+ ui.status(_('relinked %d files (%s reclaimed)\n') %
+ (relinked, util.bytecount(savedbytes)))
+
+cmdtable = {
+ 'relink': (
+ relink,
+ [],
+ _('[ORIGIN]')
+ )
+}
diff --git a/hgext/schemes.py b/hgext/schemes.py
new file mode 100644
index 0000000..51ab3ed
--- /dev/null
+++ b/hgext/schemes.py
@@ -0,0 +1,101 @@
+# Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""extend schemes with shortcuts to repository swarms
+
+This extension allows you to specify shortcuts for parent URLs with a
+lot of repositories to act like a scheme, for example::
+
+ [schemes]
+ py = http://code.python.org/hg/
+
+After that you can use it like::
+
+ hg clone py://trunk/
+
+Additionally there is support for some more complex schemas, for
+example used by Google Code::
+
+ [schemes]
+ gcode = http://{1}.googlecode.com/hg/
+
+The syntax is taken from Mercurial templates, and you have unlimited
+number of variables, starting with ``{1}`` and continuing with
+``{2}``, ``{3}`` and so on. This variables will receive parts of URL
+supplied, split by ``/``. Anything not specified as ``{part}`` will be
+just appended to an URL.
+
+For convenience, the extension adds these schemes by default::
+
+ [schemes]
+ py = http://hg.python.org/
+ bb = https://bitbucket.org/
+ bb+ssh = ssh://hg@bitbucket.org/
+ gcode = https://{1}.googlecode.com/hg/
+ kiln = https://{1}.kilnhg.com/Repo/
+
+You can override a predefined scheme by defining a new scheme with the
+same name.
+"""
+
+import os, re
+from mercurial import extensions, hg, templater, util
+from mercurial.i18n import _
+
+testedwith = 'internal'
+
+
+class ShortRepository(object):
+ def __init__(self, url, scheme, templater):
+ self.scheme = scheme
+ self.templater = templater
+ self.url = url
+ try:
+ self.parts = max(map(int, re.findall(r'\{(\d+)\}', self.url)))
+ except ValueError:
+ self.parts = 0
+
+ def __repr__(self):
+ return '<ShortRepository: %s>' % self.scheme
+
+ def instance(self, ui, url, create):
+ # Should this use urlmod.url(), or is manual parsing better?
+ url = url.split('://', 1)[1]
+ parts = url.split('/', self.parts)
+ if len(parts) > self.parts:
+ tail = parts[-1]
+ parts = parts[:-1]
+ else:
+ tail = ''
+ context = dict((str(i + 1), v) for i, v in enumerate(parts))
+ url = ''.join(self.templater.process(self.url, context)) + tail
+ return hg._peerlookup(url).instance(ui, url, create)
+
+def hasdriveletter(orig, path):
+ if path:
+ for scheme in schemes:
+ if path.startswith(scheme + ':'):
+ return False
+ return orig(path)
+
+schemes = {
+ 'py': 'http://hg.python.org/',
+ 'bb': 'https://bitbucket.org/',
+ 'bb+ssh': 'ssh://hg@bitbucket.org/',
+ 'gcode': 'https://{1}.googlecode.com/hg/',
+ 'kiln': 'https://{1}.kilnhg.com/Repo/'
+ }
+
+def extsetup(ui):
+ schemes.update(dict(ui.configitems('schemes')))
+ t = templater.engine(lambda x: x)
+ for scheme, url in schemes.items():
+ if (os.name == 'nt' and len(scheme) == 1 and scheme.isalpha()
+ and os.path.exists('%s:\\' % scheme)):
+ raise util.Abort(_('custom scheme %s:// conflicts with drive '
+ 'letter %s:\\\n') % (scheme, scheme.upper()))
+ hg.schemes[scheme] = ShortRepository(url, scheme, t)
+
+ extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
diff --git a/hgext/share.py b/hgext/share.py
new file mode 100644
index 0000000..fb11921
--- /dev/null
+++ b/hgext/share.py
@@ -0,0 +1,75 @@
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''share a common history between several working directories'''
+
+from mercurial.i18n import _
+from mercurial import hg, commands, util
+
+testedwith = 'internal'
+
+def share(ui, source, dest=None, noupdate=False):
+ """create a new shared repository
+
+ Initialize a new repository and working directory that shares its
+ history with another repository.
+
+ .. note::
+ using rollback or extensions that destroy/modify history (mq,
+ rebase, etc.) can cause considerable confusion with shared
+ clones. In particular, if two shared clones are both updated to
+ the same changeset, and one of them destroys that changeset
+ with rollback, the other clone will suddenly stop working: all
+ operations will fail with "abort: working directory has unknown
+ parent". The only known workaround is to use debugsetparents on
+ the broken clone to reset it to a changeset that still exists
+ (e.g. tip).
+ """
+
+ return hg.share(ui, source, dest, not noupdate)
+
+def unshare(ui, repo):
+ """convert a shared repository to a normal one
+
+ Copy the store data to the repo and remove the sharedpath data.
+ """
+
+ if repo.sharedpath == repo.path:
+ raise util.Abort(_("this is not a shared repo"))
+
+ destlock = lock = None
+ lock = repo.lock()
+ try:
+ # we use locks here because if we race with commit, we
+ # can end up with extra data in the cloned revlogs that's
+ # not pointed to by changesets, thus causing verify to
+ # fail
+
+ destlock = hg.copystore(ui, repo, repo.path)
+
+ sharefile = repo.join('sharedpath')
+ util.rename(sharefile, sharefile + '.old')
+
+ repo.requirements.discard('sharedpath')
+ repo._writerequirements()
+ finally:
+ destlock and destlock.release()
+ lock and lock.release()
+
+ # update store, spath, sopener and sjoin of repo
+ repo.__init__(ui, repo.root)
+
+cmdtable = {
+ "share":
+ (share,
+ [('U', 'noupdate', None, _('do not create a working copy'))],
+ _('[-U] SOURCE [DEST]')),
+ "unshare":
+ (unshare,
+ [],
+ ''),
+}
+
+commands.norepo += " share"
diff --git a/hgext/transplant.py b/hgext/transplant.py
new file mode 100644
index 0000000..a506c0c
--- /dev/null
+++ b/hgext/transplant.py
@@ -0,0 +1,676 @@
+# Patch transplanting extension for Mercurial
+#
+# Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to transplant changesets from another branch
+
+This extension allows you to transplant patches from another branch.
+
+Transplanted patches are recorded in .hg/transplant/transplants, as a
+map from a changeset hash to its hash in the source repository.
+'''
+
+from mercurial.i18n import _
+import os, tempfile
+from mercurial.node import short
+from mercurial import bundlerepo, hg, merge, match
+from mercurial import patch, revlog, scmutil, util, error, cmdutil
+from mercurial import revset, templatekw
+
+class TransplantError(error.Abort):
+ pass
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+testedwith = 'internal'
+
+class transplantentry(object):
+ def __init__(self, lnode, rnode):
+ self.lnode = lnode
+ self.rnode = rnode
+
+class transplants(object):
+ def __init__(self, path=None, transplantfile=None, opener=None):
+ self.path = path
+ self.transplantfile = transplantfile
+ self.opener = opener
+
+ if not opener:
+ self.opener = scmutil.opener(self.path)
+ self.transplants = {}
+ self.dirty = False
+ self.read()
+
+ def read(self):
+ abspath = os.path.join(self.path, self.transplantfile)
+ if self.transplantfile and os.path.exists(abspath):
+ for line in self.opener.read(self.transplantfile).splitlines():
+ lnode, rnode = map(revlog.bin, line.split(':'))
+ list = self.transplants.setdefault(rnode, [])
+ list.append(transplantentry(lnode, rnode))
+
+ def write(self):
+ if self.dirty and self.transplantfile:
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ fp = self.opener(self.transplantfile, 'w')
+ for list in self.transplants.itervalues():
+ for t in list:
+ l, r = map(revlog.hex, (t.lnode, t.rnode))
+ fp.write(l + ':' + r + '\n')
+ fp.close()
+ self.dirty = False
+
+ def get(self, rnode):
+ return self.transplants.get(rnode) or []
+
+ def set(self, lnode, rnode):
+ list = self.transplants.setdefault(rnode, [])
+ list.append(transplantentry(lnode, rnode))
+ self.dirty = True
+
+ def remove(self, transplant):
+ list = self.transplants.get(transplant.rnode)
+ if list:
+ del list[list.index(transplant)]
+ self.dirty = True
+
+class transplanter(object):
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.path = repo.join('transplant')
+ self.opener = scmutil.opener(self.path)
+ self.transplants = transplants(self.path, 'transplants',
+ opener=self.opener)
+ self.editor = None
+
+ def applied(self, repo, node, parent):
+ '''returns True if a node is already an ancestor of parent
+ or is parent or has already been transplanted'''
+ if hasnode(repo, parent):
+ parentrev = repo.changelog.rev(parent)
+ if hasnode(repo, node):
+ rev = repo.changelog.rev(node)
+ reachable = repo.changelog.incancestors([parentrev], rev)
+ if rev in reachable:
+ return True
+ for t in self.transplants.get(node):
+ # it might have been stripped
+ if not hasnode(repo, t.lnode):
+ self.transplants.remove(t)
+ return False
+ lnoderev = repo.changelog.rev(t.lnode)
+ if lnoderev in repo.changelog.incancestors([parentrev], lnoderev):
+ return True
+ return False
+
+ def apply(self, repo, source, revmap, merges, opts={}):
+ '''apply the revisions in revmap one by one in revision order'''
+ revs = sorted(revmap)
+ p1, p2 = repo.dirstate.parents()
+ pulls = []
+ diffopts = patch.diffopts(self.ui, opts)
+ diffopts.git = True
+
+ lock = wlock = tr = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ tr = repo.transaction('transplant')
+ for rev in revs:
+ node = revmap[rev]
+ revstr = '%s:%s' % (rev, short(node))
+
+ if self.applied(repo, node, p1):
+ self.ui.warn(_('skipping already applied revision %s\n') %
+ revstr)
+ continue
+
+ parents = source.changelog.parents(node)
+ if not (opts.get('filter') or opts.get('log')):
+ # If the changeset parent is the same as the
+ # wdir's parent, just pull it.
+ if parents[0] == p1:
+ pulls.append(node)
+ p1 = node
+ continue
+ if pulls:
+ if source != repo:
+ repo.pull(source.peer(), heads=pulls)
+ merge.update(repo, pulls[-1], False, False, None)
+ p1, p2 = repo.dirstate.parents()
+ pulls = []
+
+ domerge = False
+ if node in merges:
+ # pulling all the merge revs at once would mean we
+ # couldn't transplant after the latest even if
+ # transplants before them fail.
+ domerge = True
+ if not hasnode(repo, node):
+ repo.pull(source, heads=[node])
+
+ skipmerge = False
+ if parents[1] != revlog.nullid:
+ if not opts.get('parent'):
+ self.ui.note(_('skipping merge changeset %s:%s\n')
+ % (rev, short(node)))
+ skipmerge = True
+ else:
+ parent = source.lookup(opts['parent'])
+ if parent not in parents:
+ raise util.Abort(_('%s is not a parent of %s') %
+ (short(parent), short(node)))
+ else:
+ parent = parents[0]
+
+ if skipmerge:
+ patchfile = None
+ else:
+ fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
+ fp = os.fdopen(fd, 'w')
+ gen = patch.diff(source, parent, node, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+
+ del revmap[rev]
+ if patchfile or domerge:
+ try:
+ try:
+ n = self.applyone(repo, node,
+ source.changelog.read(node),
+ patchfile, merge=domerge,
+ log=opts.get('log'),
+ filter=opts.get('filter'))
+ except TransplantError:
+ # Do not rollback, it is up to the user to
+ # fix the merge or cancel everything
+ tr.close()
+ raise
+ if n and domerge:
+ self.ui.status(_('%s merged at %s\n') % (revstr,
+ short(n)))
+ elif n:
+ self.ui.status(_('%s transplanted to %s\n')
+ % (short(node),
+ short(n)))
+ finally:
+ if patchfile:
+ os.unlink(patchfile)
+ tr.close()
+ if pulls:
+ repo.pull(source.peer(), heads=pulls)
+ merge.update(repo, pulls[-1], False, False, None)
+ finally:
+ self.saveseries(revmap, merges)
+ self.transplants.write()
+ if tr:
+ tr.release()
+ lock.release()
+ wlock.release()
+
+ def filter(self, filter, node, changelog, patchfile):
+ '''arbitrarily rewrite changeset before applying it'''
+
+ self.ui.status(_('filtering %s\n') % patchfile)
+ user, date, msg = (changelog[1], changelog[2], changelog[4])
+ fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
+ fp = os.fdopen(fd, 'w')
+ fp.write("# HG changeset patch\n")
+ fp.write("# User %s\n" % user)
+ fp.write("# Date %d %d\n" % date)
+ fp.write(msg + '\n')
+ fp.close()
+
+ try:
+ util.system('%s %s %s' % (filter, util.shellquote(headerfile),
+ util.shellquote(patchfile)),
+ environ={'HGUSER': changelog[1],
+ 'HGREVISION': revlog.hex(node),
+ },
+ onerr=util.Abort, errprefix=_('filter failed'),
+ out=self.ui.fout)
+ user, date, msg = self.parselog(file(headerfile))[1:4]
+ finally:
+ os.unlink(headerfile)
+
+ return (user, date, msg)
+
+ def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
+ filter=None):
+ '''apply the patch in patchfile to the repository as a transplant'''
+ (manifest, user, (time, timezone), files, message) = cl[:5]
+ date = "%d %d" % (time, timezone)
+ extra = {'transplant_source': node}
+ if filter:
+ (user, date, message) = self.filter(filter, node, cl, patchfile)
+
+ if log:
+ # we don't translate messages inserted into commits
+ message += '\n(transplanted from %s)' % revlog.hex(node)
+
+ self.ui.status(_('applying %s\n') % short(node))
+ self.ui.note('%s %s\n%s\n' % (user, date, message))
+
+ if not patchfile and not merge:
+ raise util.Abort(_('can only omit patchfile if merging'))
+ if patchfile:
+ try:
+ files = set()
+ patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
+ files = list(files)
+ except Exception, inst:
+ seriespath = os.path.join(self.path, 'series')
+ if os.path.exists(seriespath):
+ os.unlink(seriespath)
+ p1 = repo.dirstate.p1()
+ p2 = node
+ self.log(user, date, message, p1, p2, merge=merge)
+ self.ui.write(str(inst) + '\n')
+ raise TransplantError(_('fix up the merge and run '
+ 'hg transplant --continue'))
+ else:
+ files = None
+ if merge:
+ p1, p2 = repo.dirstate.parents()
+ repo.setparents(p1, node)
+ m = match.always(repo.root, '')
+ else:
+ m = match.exact(repo.root, '', files)
+
+ n = repo.commit(message, user, date, extra=extra, match=m,
+ editor=self.editor)
+ if not n:
+ self.ui.warn(_('skipping emptied changeset %s\n') % short(node))
+ return None
+ if not merge:
+ self.transplants.set(n, node)
+
+ return n
+
+ def resume(self, repo, source, opts=None):
+ '''recover last transaction and apply remaining changesets'''
+ if os.path.exists(os.path.join(self.path, 'journal')):
+ n, node = self.recover(repo)
+ self.ui.status(_('%s transplanted as %s\n') % (short(node),
+ short(n)))
+ seriespath = os.path.join(self.path, 'series')
+ if not os.path.exists(seriespath):
+ self.transplants.write()
+ return
+ nodes, merges = self.readseries()
+ revmap = {}
+ for n in nodes:
+ revmap[source.changelog.rev(n)] = n
+ os.unlink(seriespath)
+
+ self.apply(repo, source, revmap, merges, opts)
+
+ def recover(self, repo):
+ '''commit working directory using journal metadata'''
+ node, user, date, message, parents = self.readlog()
+ merge = False
+
+ if not user or not date or not message or not parents[0]:
+ raise util.Abort(_('transplant log file is corrupt'))
+
+ parent = parents[0]
+ if len(parents) > 1:
+ if opts.get('parent'):
+ parent = source.lookup(opts['parent'])
+ if parent not in parents:
+ raise util.Abort(_('%s is not a parent of %s') %
+ (short(parent), short(node)))
+ else:
+ merge = True
+
+ extra = {'transplant_source': node}
+ wlock = repo.wlock()
+ try:
+ p1, p2 = repo.dirstate.parents()
+ if p1 != parent:
+ raise util.Abort(
+ _('working dir not at transplant parent %s') %
+ revlog.hex(parent))
+ if merge:
+ repo.setparents(p1, parents[1])
+ n = repo.commit(message, user, date, extra=extra,
+ editor=self.editor)
+ if not n:
+ raise util.Abort(_('commit failed'))
+ if not merge:
+ self.transplants.set(n, node)
+ self.unlog()
+
+ return n, node
+ finally:
+ wlock.release()
+
+ def readseries(self):
+ nodes = []
+ merges = []
+ cur = nodes
+ for line in self.opener.read('series').splitlines():
+ if line.startswith('# Merges'):
+ cur = merges
+ continue
+ cur.append(revlog.bin(line))
+
+ return (nodes, merges)
+
+ def saveseries(self, revmap, merges):
+ if not revmap:
+ return
+
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ series = self.opener('series', 'w')
+ for rev in sorted(revmap):
+ series.write(revlog.hex(revmap[rev]) + '\n')
+ if merges:
+ series.write('# Merges\n')
+ for m in merges:
+ series.write(revlog.hex(m) + '\n')
+ series.close()
+
+ def parselog(self, fp):
+ parents = []
+ message = []
+ node = revlog.nullid
+ inmsg = False
+ user = None
+ date = None
+ for line in fp.read().splitlines():
+ if inmsg:
+ message.append(line)
+ elif line.startswith('# User '):
+ user = line[7:]
+ elif line.startswith('# Date '):
+ date = line[7:]
+ elif line.startswith('# Node ID '):
+ node = revlog.bin(line[10:])
+ elif line.startswith('# Parent '):
+ parents.append(revlog.bin(line[9:]))
+ elif not line.startswith('# '):
+ inmsg = True
+ message.append(line)
+ if None in (user, date):
+ raise util.Abort(_("filter corrupted changeset (no user or date)"))
+ return (node, user, date, '\n'.join(message), parents)
+
+ def log(self, user, date, message, p1, p2, merge=False):
+ '''journal changelog metadata for later recover'''
+
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ fp = self.opener('journal', 'w')
+ fp.write('# User %s\n' % user)
+ fp.write('# Date %s\n' % date)
+ fp.write('# Node ID %s\n' % revlog.hex(p2))
+ fp.write('# Parent ' + revlog.hex(p1) + '\n')
+ if merge:
+ fp.write('# Parent ' + revlog.hex(p2) + '\n')
+ fp.write(message.rstrip() + '\n')
+ fp.close()
+
+ def readlog(self):
+ return self.parselog(self.opener('journal'))
+
+ def unlog(self):
+ '''remove changelog journal'''
+ absdst = os.path.join(self.path, 'journal')
+ if os.path.exists(absdst):
+ os.unlink(absdst)
+
+ def transplantfilter(self, repo, source, root):
+ def matchfn(node):
+ if self.applied(repo, node, root):
+ return False
+ if source.changelog.parents(node)[1] != revlog.nullid:
+ return False
+ extra = source.changelog.read(node)[5]
+ cnode = extra.get('transplant_source')
+ if cnode and self.applied(repo, cnode, root):
+ return False
+ return True
+
+ return matchfn
+
+def hasnode(repo, node):
+ try:
+ return repo.changelog.rev(node) is not None
+ except error.RevlogError:
+ return False
+
+def browserevs(ui, repo, nodes, opts):
+ '''interactively transplant changesets'''
+ def browsehelp(ui):
+ ui.write(_('y: transplant this changeset\n'
+ 'n: skip this changeset\n'
+ 'm: merge at this changeset\n'
+ 'p: show patch\n'
+ 'c: commit selected changesets\n'
+ 'q: cancel transplant\n'
+ '?: show this help\n'))
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ transplants = []
+ merges = []
+ for node in nodes:
+ displayer.show(repo[node])
+ action = None
+ while not action:
+ action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
+ if action == '?':
+ browsehelp(ui)
+ action = None
+ elif action == 'p':
+ parent = repo.changelog.parents(node)[0]
+ for chunk in patch.diff(repo, parent, node):
+ ui.write(chunk)
+ action = None
+ elif action not in ('y', 'n', 'm', 'c', 'q'):
+ ui.write(_('no such option\n'))
+ action = None
+ if action == 'y':
+ transplants.append(node)
+ elif action == 'm':
+ merges.append(node)
+ elif action == 'c':
+ break
+ elif action == 'q':
+ transplants = ()
+ merges = ()
+ break
+ displayer.close()
+ return (transplants, merges)
+
+@command('transplant',
+ [('s', 'source', '', _('pull patches from REPO'), _('REPO')),
+ ('b', 'branch', [],
+ _('pull patches from branch BRANCH'), _('BRANCH')),
+ ('a', 'all', None, _('pull all changesets up to BRANCH')),
+ ('p', 'prune', [], _('skip over REV'), _('REV')),
+ ('m', 'merge', [], _('merge at REV'), _('REV')),
+ ('', 'parent', '',
+ _('parent to choose when transplanting merge'), _('REV')),
+ ('e', 'edit', False, _('invoke editor on commit messages')),
+ ('', 'log', None, _('append transplant info to log message')),
+ ('c', 'continue', None, _('continue last transplant session '
+ 'after repair')),
+ ('', 'filter', '',
+ _('filter changesets through command'), _('CMD'))],
+ _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
+ '[-m REV] [REV]...'))
+def transplant(ui, repo, *revs, **opts):
+ '''transplant changesets from another branch
+
+ Selected changesets will be applied on top of the current working
+ directory with the log of the original changeset. The changesets
+ are copied and will thus appear twice in the history. Use the
+ rebase extension instead if you want to move a whole branch of
+ unpublished changesets.
+
+ If --log is specified, log messages will have a comment appended
+ of the form::
+
+ (transplanted from CHANGESETHASH)
+
+ You can rewrite the changelog message with the --filter option.
+ Its argument will be invoked with the current changelog message as
+ $1 and the patch as $2.
+
+ If --source/-s is specified, selects changesets from the named
+ repository. If --branch/-b is specified, selects changesets from
+ the branch holding the named revision, up to that revision. If
+ --all/-a is specified, all changesets on the branch will be
+ transplanted, otherwise you will be prompted to select the
+ changesets you want.
+
+ :hg:`transplant --branch REV --all` will transplant the
+ selected branch (up to the named revision) onto your current
+ working directory.
+
+ You can optionally mark selected transplanted changesets as merge
+ changesets. You will not be prompted to transplant any ancestors
+ of a merged transplant, and you can merge descendants of them
+ normally instead of transplanting them.
+
+ Merge changesets may be transplanted directly by specifying the
+ proper parent changeset by calling :hg:`transplant --parent`.
+
+ If no merges or revisions are provided, :hg:`transplant` will
+ start an interactive changeset browser.
+
+ If a changeset application fails, you can fix the merge by hand
+ and then resume where you left off by calling :hg:`transplant
+ --continue/-c`.
+ '''
+ def incwalk(repo, csets, match=util.always):
+ for node in csets:
+ if match(node):
+ yield node
+
+ def transplantwalk(repo, root, branches, match=util.always):
+ if not branches:
+ branches = repo.heads()
+ ancestors = []
+ for branch in branches:
+ ancestors.append(repo.changelog.ancestor(root, branch))
+ for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
+ if match(node):
+ yield node
+
+ def checkopts(opts, revs):
+ if opts.get('continue'):
+ if opts.get('branch') or opts.get('all') or opts.get('merge'):
+ raise util.Abort(_('--continue is incompatible with '
+ 'branch, all or merge'))
+ return
+ if not (opts.get('source') or revs or
+ opts.get('merge') or opts.get('branch')):
+ raise util.Abort(_('no source URL, branch tag or revision '
+ 'list provided'))
+ if opts.get('all'):
+ if not opts.get('branch'):
+ raise util.Abort(_('--all requires a branch revision'))
+ if revs:
+ raise util.Abort(_('--all is incompatible with a '
+ 'revision list'))
+
+ checkopts(opts, revs)
+
+ if not opts.get('log'):
+ opts['log'] = ui.config('transplant', 'log')
+ if not opts.get('filter'):
+ opts['filter'] = ui.config('transplant', 'filter')
+
+ tp = transplanter(ui, repo)
+ if opts.get('edit'):
+ tp.editor = cmdutil.commitforceeditor
+
+ p1, p2 = repo.dirstate.parents()
+ if len(repo) > 0 and p1 == revlog.nullid:
+ raise util.Abort(_('no revision checked out'))
+ if not opts.get('continue'):
+ if p2 != revlog.nullid:
+ raise util.Abort(_('outstanding uncommitted merges'))
+ m, a, r, d = repo.status()[:4]
+ if m or a or r or d:
+ raise util.Abort(_('outstanding local changes'))
+
+ sourcerepo = opts.get('source')
+ if sourcerepo:
+ peer = hg.peer(ui, opts, ui.expandpath(sourcerepo))
+ branches = map(peer.lookup, opts.get('branch', ()))
+ source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
+ onlyheads=branches, force=True)
+ else:
+ source = repo
+ branches = map(source.lookup, opts.get('branch', ()))
+ cleanupfn = None
+
+ try:
+ if opts.get('continue'):
+ tp.resume(repo, source, opts)
+ return
+
+ tf = tp.transplantfilter(repo, source, p1)
+ if opts.get('prune'):
+ prune = [source.lookup(r)
+ for r in scmutil.revrange(source, opts.get('prune'))]
+ matchfn = lambda x: tf(x) and x not in prune
+ else:
+ matchfn = tf
+ merges = map(source.lookup, opts.get('merge', ()))
+ revmap = {}
+ if revs:
+ for r in scmutil.revrange(source, revs):
+ revmap[int(r)] = source.lookup(r)
+ elif opts.get('all') or not merges:
+ if source != repo:
+ alltransplants = incwalk(source, csets, match=matchfn)
+ else:
+ alltransplants = transplantwalk(source, p1, branches,
+ match=matchfn)
+ if opts.get('all'):
+ revs = alltransplants
+ else:
+ revs, newmerges = browserevs(ui, source, alltransplants, opts)
+ merges.extend(newmerges)
+ for r in revs:
+ revmap[source.changelog.rev(r)] = r
+ for r in merges:
+ revmap[source.changelog.rev(r)] = r
+
+ tp.apply(repo, source, revmap, merges, opts)
+ finally:
+ if cleanupfn:
+ cleanupfn()
+
+def revsettransplanted(repo, subset, x):
+ """``transplanted([set])``
+ Transplanted changesets in set, or all transplanted changesets.
+ """
+ if x:
+ s = revset.getset(repo, subset, x)
+ else:
+ s = subset
+ return [r for r in s if repo[r].extra().get('transplant_source')]
+
+def kwtransplanted(repo, ctx, **args):
+ """:transplanted: String. The node identifier of the transplanted
+ changeset if any."""
+ n = ctx.extra().get('transplant_source')
+ return n and revlog.hex(n) or ''
+
+def extsetup(ui):
+ revset.symbols['transplanted'] = revsettransplanted
+ templatekw.keywords['transplanted'] = kwtransplanted
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = [revsettransplanted, kwtransplanted]
diff --git a/hgext/win32mbcs.py b/hgext/win32mbcs.py
new file mode 100644
index 0000000..65f0854
--- /dev/null
+++ b/hgext/win32mbcs.py
@@ -0,0 +1,167 @@
+# win32mbcs.py -- MBCS filename support for Mercurial
+#
+# Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
+#
+# Version: 0.3
+# Author: Shun-ichi Goto <shunichi.goto@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+
+'''allow the use of MBCS paths with problematic encodings
+
+Some MBCS encodings are not good for some path operations (i.e.
+splitting path, case conversion, etc.) with its encoded bytes. We call
+such a encoding (i.e. shift_jis and big5) as "problematic encoding".
+This extension can be used to fix the issue with those encodings by
+wrapping some functions to convert to Unicode string before path
+operation.
+
+This extension is useful for:
+
+- Japanese Windows users using shift_jis encoding.
+- Chinese Windows users using big5 encoding.
+- All users who use a repository with one of problematic encodings on
+ case-insensitive file system.
+
+This extension is not needed for:
+
+- Any user who use only ASCII chars in path.
+- Any user who do not use any of problematic encodings.
+
+Note that there are some limitations on using this extension:
+
+- You should use single encoding in one repository.
+- If the repository path ends with 0x5c, .hg/hgrc cannot be read.
+- win32mbcs is not compatible with fixutf8 extension.
+
+By default, win32mbcs uses encoding.encoding decided by Mercurial.
+You can specify the encoding by config option::
+
+ [win32mbcs]
+ encoding = sjis
+
+It is useful for the users who want to commit with UTF-8 log message.
+'''
+
+import os, sys
+from mercurial.i18n import _
+from mercurial import util, encoding
+testedwith = 'internal'
+
+_encoding = None # see extsetup
+
+def decode(arg):
+ if isinstance(arg, str):
+ uarg = arg.decode(_encoding)
+ if arg == uarg.encode(_encoding):
+ return uarg
+ raise UnicodeError("Not local encoding")
+ elif isinstance(arg, tuple):
+ return tuple(map(decode, arg))
+ elif isinstance(arg, list):
+ return map(decode, arg)
+ elif isinstance(arg, dict):
+ for k, v in arg.items():
+ arg[k] = decode(v)
+ return arg
+
+def encode(arg):
+ if isinstance(arg, unicode):
+ return arg.encode(_encoding)
+ elif isinstance(arg, tuple):
+ return tuple(map(encode, arg))
+ elif isinstance(arg, list):
+ return map(encode, arg)
+ elif isinstance(arg, dict):
+ for k, v in arg.items():
+ arg[k] = encode(v)
+ return arg
+
+def appendsep(s):
+ # ensure the path ends with os.sep, appending it if necessary.
+ try:
+ us = decode(s)
+ except UnicodeError:
+ us = s
+ if us and us[-1] not in ':/\\':
+ s += os.sep
+ return s
+
+def wrapper(func, args, kwds):
+ # check argument is unicode, then call original
+ for arg in args:
+ if isinstance(arg, unicode):
+ return func(*args, **kwds)
+
+ try:
+ # convert arguments to unicode, call func, then convert back
+ return encode(func(*decode(args), **decode(kwds)))
+ except UnicodeError:
+ raise util.Abort(_("[win32mbcs] filename conversion failed with"
+ " %s encoding\n") % (_encoding))
+
+def wrapperforlistdir(func, args, kwds):
+ # Ensure 'path' argument ends with os.sep to avoids
+ # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
+ if args:
+ args = list(args)
+ args[0] = appendsep(args[0])
+ if 'path' in kwds:
+ kwds['path'] = appendsep(kwds['path'])
+ return func(*args, **kwds)
+
+def wrapname(name, wrapper):
+ module, name = name.rsplit('.', 1)
+ module = sys.modules[module]
+ func = getattr(module, name)
+ def f(*args, **kwds):
+ return wrapper(func, args, kwds)
+ try:
+ f.__name__ = func.__name__ # fail with python23
+ except Exception:
+ pass
+ setattr(module, name, f)
+
+# List of functions to be wrapped.
+# NOTE: os.path.dirname() and os.path.basename() are safe because
+# they use result of os.path.split()
+funcs = '''os.path.join os.path.split os.path.splitext
+ os.path.normpath os.makedirs
+ mercurial.util.endswithsep mercurial.util.splitpath mercurial.util.checkcase
+ mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
+ mercurial.util.checkwinfilename mercurial.util.checkosfilename'''
+
+# List of Windows specific functions to be wrapped.
+winfuncs = '''os.path.splitunc'''
+
+# codec and alias names of sjis and big5 to be faked.
+problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
+ hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
+ sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
+ shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
+
+def extsetup(ui):
+ # TODO: decide use of config section for this extension
+ if ((not os.path.supports_unicode_filenames) and
+ (sys.platform != 'cygwin')):
+ ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
+ return
+ # determine encoding for filename
+ global _encoding
+ _encoding = ui.config('win32mbcs', 'encoding', encoding.encoding)
+ # fake is only for relevant environment.
+ if _encoding.lower() in problematic_encodings.split():
+ for f in funcs.split():
+ wrapname(f, wrapper)
+ if os.name == 'nt':
+ for f in winfuncs.split():
+ wrapname(f, wrapper)
+ wrapname("mercurial.osutil.listdir", wrapperforlistdir)
+ # Check sys.args manually instead of using ui.debug() because
+ # command line options is not yet applied when
+ # extensions.loadall() is called.
+ if '--debug' in sys.argv:
+ ui.write("[win32mbcs] activated with encoding: %s\n"
+ % _encoding)
diff --git a/hgext/win32text.py b/hgext/win32text.py
new file mode 100644
index 0000000..a26c997
--- /dev/null
+++ b/hgext/win32text.py
@@ -0,0 +1,172 @@
+# win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
+#
+# Copyright 2005, 2007-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''perform automatic newline conversion
+
+ Deprecation: The win32text extension requires each user to configure
+ the extension again and again for each clone since the configuration
+ is not copied when cloning.
+
+ We have therefore made the ``eol`` as an alternative. The ``eol``
+ uses a version controlled file for its configuration and each clone
+ will therefore use the right settings from the start.
+
+To perform automatic newline conversion, use::
+
+ [extensions]
+ win32text =
+ [encode]
+ ** = cleverencode:
+ # or ** = macencode:
+
+ [decode]
+ ** = cleverdecode:
+ # or ** = macdecode:
+
+If not doing conversion, to make sure you do not commit CRLF/CR by accident::
+
+ [hooks]
+ pretxncommit.crlf = python:hgext.win32text.forbidcrlf
+ # or pretxncommit.cr = python:hgext.win32text.forbidcr
+
+To do the same check on a server to prevent CRLF/CR from being
+pushed or pulled::
+
+ [hooks]
+ pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
+ # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import util
+import re
+
+testedwith = 'internal'
+
+# regexp for single LF without CR preceding.
+re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
+
+newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
+filterstr = {'\r\n': 'clever', '\r': 'mac'}
+
+def checknewline(s, newline, ui=None, repo=None, filename=None):
+ # warn if already has 'newline' in repository.
+ # it might cause unexpected eol conversion.
+ # see issue 302:
+ # http://mercurial.selenic.com/bts/issue302
+ if newline in s and ui and filename and repo:
+ ui.warn(_('WARNING: %s already has %s line endings\n'
+ 'and does not need EOL conversion by the win32text plugin.\n'
+ 'Before your next commit, please reconsider your '
+ 'encode/decode settings in \nMercurial.ini or %s.\n') %
+ (filename, newlinestr[newline], repo.join('hgrc')))
+
+def dumbdecode(s, cmd, **kwargs):
+ checknewline(s, '\r\n', **kwargs)
+ # replace single LF to CRLF
+ return re_single_lf.sub('\\1\r\n', s)
+
+def dumbencode(s, cmd):
+ return s.replace('\r\n', '\n')
+
+def macdumbdecode(s, cmd, **kwargs):
+ checknewline(s, '\r', **kwargs)
+ return s.replace('\n', '\r')
+
+def macdumbencode(s, cmd):
+ return s.replace('\r', '\n')
+
+def cleverdecode(s, cmd, **kwargs):
+ if not util.binary(s):
+ return dumbdecode(s, cmd, **kwargs)
+ return s
+
+def cleverencode(s, cmd):
+ if not util.binary(s):
+ return dumbencode(s, cmd)
+ return s
+
+def macdecode(s, cmd, **kwargs):
+ if not util.binary(s):
+ return macdumbdecode(s, cmd, **kwargs)
+ return s
+
+def macencode(s, cmd):
+ if not util.binary(s):
+ return macdumbencode(s, cmd)
+ return s
+
+_filters = {
+ 'dumbdecode:': dumbdecode,
+ 'dumbencode:': dumbencode,
+ 'cleverdecode:': cleverdecode,
+ 'cleverencode:': cleverencode,
+ 'macdumbdecode:': macdumbdecode,
+ 'macdumbencode:': macdumbencode,
+ 'macdecode:': macdecode,
+ 'macencode:': macencode,
+ }
+
+def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
+ halt = False
+ seen = set()
+ # we try to walk changesets in reverse order from newest to
+ # oldest, so that if we see a file multiple times, we take the
+ # newest version as canonical. this prevents us from blocking a
+ # changegroup that contains an unacceptable commit followed later
+ # by a commit that fixes the problem.
+ tip = repo['tip']
+ for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
+ c = repo[rev]
+ for f in c.files():
+ if f in seen or f not in tip or f not in c:
+ continue
+ seen.add(f)
+ data = c[f].data()
+ if not util.binary(data) and newline in data:
+ if not halt:
+ ui.warn(_('attempt to commit or push text file(s) '
+ 'using %s line endings\n') %
+ newlinestr[newline])
+ ui.warn(_('in %s: %s\n') % (short(c.node()), f))
+ halt = True
+ if halt and hooktype == 'pretxnchangegroup':
+ crlf = newlinestr[newline].lower()
+ filter = filterstr[newline]
+ ui.warn(_('\nTo prevent this mistake in your local repository,\n'
+ 'add to Mercurial.ini or .hg/hgrc:\n'
+ '\n'
+ '[hooks]\n'
+ 'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
+ '\n'
+ 'and also consider adding:\n'
+ '\n'
+ '[extensions]\n'
+ 'win32text =\n'
+ '[encode]\n'
+ '** = %sencode:\n'
+ '[decode]\n'
+ '** = %sdecode:\n') % (crlf, crlf, filter, filter))
+ return halt
+
+def forbidcrlf(ui, repo, hooktype, node, **kwargs):
+ return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
+
+def forbidcr(ui, repo, hooktype, node, **kwargs):
+ return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+ for name, fn in _filters.iteritems():
+ repo.adddatafilter(name, fn)
+
+def extsetup(ui):
+ if ui.configbool('win32text', 'warn', True):
+ ui.warn(_("win32text is deprecated: "
+ "http://mercurial.selenic.com/wiki/Win32TextExtension\n"))
diff --git a/hgext/zeroconf/Zeroconf.py b/hgext/zeroconf/Zeroconf.py
new file mode 100644
index 0000000..e8dfa14
--- /dev/null
+++ b/hgext/zeroconf/Zeroconf.py
@@ -0,0 +1,1582 @@
+""" Multicast DNS Service Discovery for Python, v0.12
+ Copyright (C) 2003, Paul Scott-Murphy
+
+ This module provides a framework for the use of DNS Service Discovery
+ using IP multicast. It has been tested against the JRendezvous
+ implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
+ and against the mDNSResponder from Mac OS X 10.3.8.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see
+ <http://www.gnu.org/licenses/>.
+
+"""
+
+"""0.12 update - allow selection of binding interface
+ typo fix - Thanks A. M. Kuchlingi
+ removed all use of word 'Rendezvous' - this is an API change"""
+
+"""0.11 update - correction to comments for addListener method
+ support for new record types seen from OS X
+ - IPv6 address
+ - hostinfo
+ ignore unknown DNS record types
+ fixes to name decoding
+ works alongside other processes using port 5353 (e.g. on Mac OS X)
+ tested against Mac OS X 10.3.2's mDNSResponder
+ corrections to removal of list entries for service browser"""
+
+"""0.10 update - Jonathon Paisley contributed these corrections:
+ always multicast replies, even when query is unicast
+ correct a pointer encoding problem
+ can now write records in any order
+ traceback shown on failure
+ better TXT record parsing
+ server is now separate from name
+ can cancel a service browser
+
+ modified some unit tests to accommodate these changes"""
+
+"""0.09 update - remove all records on service unregistration
+ fix DOS security problem with readName"""
+
+"""0.08 update - changed licensing to LGPL"""
+
+"""0.07 update - faster shutdown on engine
+ pointer encoding of outgoing names
+ ServiceBrowser now works
+ new unit tests"""
+
+"""0.06 update - small improvements with unit tests
+ added defined exception types
+ new style objects
+ fixed hostname/interface problem
+ fixed socket timeout problem
+ fixed addServiceListener() typo bug
+ using select() for socket reads
+ tested on Debian unstable with Python 2.2.2"""
+
+"""0.05 update - ensure case insensitivty on domain names
+ support for unicast DNS queries"""
+
+"""0.04 update - added some unit tests
+ added __ne__ adjuncts where required
+ ensure names end in '.local.'
+ timeout on receiving socket for clean shutdown"""
+
+__author__ = "Paul Scott-Murphy"
+__email__ = "paul at scott dash murphy dot com"
+__version__ = "0.12"
+
+import string
+import time
+import struct
+import socket
+import threading
+import select
+import traceback
+
+__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
+
+# hook for threads
+
+globals()['_GLOBAL_DONE'] = 0
+
+# Some timing constants
+
+_UNREGISTER_TIME = 125
+_CHECK_TIME = 175
+_REGISTER_TIME = 225
+_LISTENER_TIME = 200
+_BROWSER_TIME = 500
+
+# Some DNS constants
+
+_MDNS_ADDR = '224.0.0.251'
+_MDNS_PORT = 5353;
+_DNS_PORT = 53;
+_DNS_TTL = 60 * 60; # one hour default TTL
+
+_MAX_MSG_TYPICAL = 1460 # unused
+_MAX_MSG_ABSOLUTE = 8972
+
+_FLAGS_QR_MASK = 0x8000 # query response mask
+_FLAGS_QR_QUERY = 0x0000 # query
+_FLAGS_QR_RESPONSE = 0x8000 # response
+
+_FLAGS_AA = 0x0400 # Authorative answer
+_FLAGS_TC = 0x0200 # Truncated
+_FLAGS_RD = 0x0100 # Recursion desired
+_FLAGS_RA = 0x8000 # Recursion available
+
+_FLAGS_Z = 0x0040 # Zero
+_FLAGS_AD = 0x0020 # Authentic data
+_FLAGS_CD = 0x0010 # Checking disabled
+
+_CLASS_IN = 1
+_CLASS_CS = 2
+_CLASS_CH = 3
+_CLASS_HS = 4
+_CLASS_NONE = 254
+_CLASS_ANY = 255
+_CLASS_MASK = 0x7FFF
+_CLASS_UNIQUE = 0x8000
+
+_TYPE_A = 1
+_TYPE_NS = 2
+_TYPE_MD = 3
+_TYPE_MF = 4
+_TYPE_CNAME = 5
+_TYPE_SOA = 6
+_TYPE_MB = 7
+_TYPE_MG = 8
+_TYPE_MR = 9
+_TYPE_NULL = 10
+_TYPE_WKS = 11
+_TYPE_PTR = 12
+_TYPE_HINFO = 13
+_TYPE_MINFO = 14
+_TYPE_MX = 15
+_TYPE_TXT = 16
+_TYPE_AAAA = 28
+_TYPE_SRV = 33
+_TYPE_ANY = 255
+
+# Mapping constants to names
+
+_CLASSES = { _CLASS_IN : "in",
+ _CLASS_CS : "cs",
+ _CLASS_CH : "ch",
+ _CLASS_HS : "hs",
+ _CLASS_NONE : "none",
+ _CLASS_ANY : "any" }
+
+_TYPES = { _TYPE_A : "a",
+ _TYPE_NS : "ns",
+ _TYPE_MD : "md",
+ _TYPE_MF : "mf",
+ _TYPE_CNAME : "cname",
+ _TYPE_SOA : "soa",
+ _TYPE_MB : "mb",
+ _TYPE_MG : "mg",
+ _TYPE_MR : "mr",
+ _TYPE_NULL : "null",
+ _TYPE_WKS : "wks",
+ _TYPE_PTR : "ptr",
+ _TYPE_HINFO : "hinfo",
+ _TYPE_MINFO : "minfo",
+ _TYPE_MX : "mx",
+ _TYPE_TXT : "txt",
+ _TYPE_AAAA : "quada",
+ _TYPE_SRV : "srv",
+ _TYPE_ANY : "any" }
+
+# utility functions
+
+def currentTimeMillis():
+ """Current system time in milliseconds"""
+ return time.time() * 1000
+
+# Exceptions
+
+class NonLocalNameException(Exception):
+ pass
+
+class NonUniqueNameException(Exception):
+ pass
+
+class NamePartTooLongException(Exception):
+ pass
+
+class AbstractMethodException(Exception):
+ pass
+
+class BadTypeInNameException(Exception):
+ pass
+
+class BadDomainName(Exception):
+ def __init__(self, pos):
+ Exception.__init__(self, "at position %s" % pos)
+
+class BadDomainNameCircular(BadDomainName):
+ pass
+
+# implementation classes
+
+class DNSEntry(object):
+ """A DNS entry"""
+
+ def __init__(self, name, type, clazz):
+ self.key = string.lower(name)
+ self.name = name
+ self.type = type
+ self.clazz = clazz & _CLASS_MASK
+ self.unique = (clazz & _CLASS_UNIQUE) != 0
+
+ def __eq__(self, other):
+ """Equality test on name, type, and class"""
+ if isinstance(other, DNSEntry):
+ return self.name == other.name and self.type == other.type and self.clazz == other.clazz
+ return 0
+
+ def __ne__(self, other):
+ """Non-equality test"""
+ return not self.__eq__(other)
+
+ def getClazz(self, clazz):
+ """Class accessor"""
+ try:
+ return _CLASSES[clazz]
+ except KeyError:
+ return "?(%s)" % (clazz)
+
+ def getType(self, type):
+ """Type accessor"""
+ try:
+ return _TYPES[type]
+ except KeyError:
+ return "?(%s)" % (type)
+
+ def toString(self, hdr, other):
+ """String representation with additional information"""
+ result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
+ if self.unique:
+ result += "-unique,"
+ else:
+ result += ","
+ result += self.name
+ if other is not None:
+ result += ",%s]" % (other)
+ else:
+ result += "]"
+ return result
+
+class DNSQuestion(DNSEntry):
+ """A DNS question entry"""
+
+ def __init__(self, name, type, clazz):
+ if not name.endswith(".local."):
+ raise NonLocalNameException(name)
+ DNSEntry.__init__(self, name, type, clazz)
+
+ def answeredBy(self, rec):
+ """Returns true if the question is answered by the record"""
+ return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
+
+ def __repr__(self):
+ """String representation"""
+ return DNSEntry.toString(self, "question", None)
+
+
+class DNSRecord(DNSEntry):
+ """A DNS record - like a DNS entry, but has a TTL"""
+
+ def __init__(self, name, type, clazz, ttl):
+ DNSEntry.__init__(self, name, type, clazz)
+ self.ttl = ttl
+ self.created = currentTimeMillis()
+
+ def __eq__(self, other):
+ """Tests equality as per DNSRecord"""
+ if isinstance(other, DNSRecord):
+ return DNSEntry.__eq__(self, other)
+ return 0
+
+ def suppressedBy(self, msg):
+ """Returns true if any answer in a message can suffice for the
+ information held in this record."""
+ for record in msg.answers:
+ if self.suppressedByAnswer(record):
+ return 1
+ return 0
+
+ def suppressedByAnswer(self, other):
+ """Returns true if another record has same name, type and class,
+ and if its TTL is at least half of this record's."""
+ if self == other and other.ttl > (self.ttl / 2):
+ return 1
+ return 0
+
+ def getExpirationTime(self, percent):
+ """Returns the time at which this record will have expired
+ by a certain percentage."""
+ return self.created + (percent * self.ttl * 10)
+
+ def getRemainingTTL(self, now):
+ """Returns the remaining TTL in seconds."""
+ return max(0, (self.getExpirationTime(100) - now) / 1000)
+
+ def isExpired(self, now):
+ """Returns true if this record has expired."""
+ return self.getExpirationTime(100) <= now
+
+ def isStale(self, now):
+ """Returns true if this record is at least half way expired."""
+ return self.getExpirationTime(50) <= now
+
+ def resetTTL(self, other):
+ """Sets this record's TTL and created time to that of
+ another record."""
+ self.created = other.created
+ self.ttl = other.ttl
+
+ def write(self, out):
+ """Abstract method"""
+ raise AbstractMethodException
+
+ def toString(self, other):
+ """String representation with addtional information"""
+ arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
+ return DNSEntry.toString(self, "record", arg)
+
+class DNSAddress(DNSRecord):
+ """A DNS address record"""
+
+ def __init__(self, name, type, clazz, ttl, address):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.address = address
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.address, len(self.address))
+
+ def __eq__(self, other):
+ """Tests equality on address"""
+ if isinstance(other, DNSAddress):
+ return self.address == other.address
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ try:
+ return socket.inet_ntoa(self.address)
+ except Exception:
+ return self.address
+
+class DNSHinfo(DNSRecord):
+ """A DNS host information record"""
+
+ def __init__(self, name, type, clazz, ttl, cpu, os):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.cpu = cpu
+ self.os = os
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.cpu, len(self.cpu))
+ out.writeString(self.os, len(self.os))
+
+ def __eq__(self, other):
+ """Tests equality on cpu and os"""
+ if isinstance(other, DNSHinfo):
+ return self.cpu == other.cpu and self.os == other.os
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.cpu + " " + self.os
+
+class DNSPointer(DNSRecord):
+ """A DNS pointer record"""
+
+ def __init__(self, name, type, clazz, ttl, alias):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.alias = alias
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeName(self.alias)
+
+ def __eq__(self, other):
+ """Tests equality on alias"""
+ if isinstance(other, DNSPointer):
+ return self.alias == other.alias
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.toString(self.alias)
+
+class DNSText(DNSRecord):
+ """A DNS text record"""
+
+ def __init__(self, name, type, clazz, ttl, text):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.text = text
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.text, len(self.text))
+
+ def __eq__(self, other):
+ """Tests equality on text"""
+ if isinstance(other, DNSText):
+ return self.text == other.text
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ if len(self.text) > 10:
+ return self.toString(self.text[:7] + "...")
+ else:
+ return self.toString(self.text)
+
+class DNSService(DNSRecord):
+ """A DNS service record"""
+
+ def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.priority = priority
+ self.weight = weight
+ self.port = port
+ self.server = server
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeShort(self.priority)
+ out.writeShort(self.weight)
+ out.writeShort(self.port)
+ out.writeName(self.server)
+
+ def __eq__(self, other):
+ """Tests equality on priority, weight, port and server"""
+ if isinstance(other, DNSService):
+ return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.toString("%s:%s" % (self.server, self.port))
+
+class DNSIncoming(object):
+ """Object representation of an incoming DNS packet"""
+
+ def __init__(self, data):
+ """Constructor from string holding bytes of packet"""
+ self.offset = 0
+ self.data = data
+ self.questions = []
+ self.answers = []
+ self.numQuestions = 0
+ self.numAnswers = 0
+ self.numAuthorities = 0
+ self.numAdditionals = 0
+
+ self.readHeader()
+ self.readQuestions()
+ self.readOthers()
+
+ def readHeader(self):
+ """Reads header portion of packet"""
+ format = '!HHHHHH'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ self.id = info[0]
+ self.flags = info[1]
+ self.numQuestions = info[2]
+ self.numAnswers = info[3]
+ self.numAuthorities = info[4]
+ self.numAdditionals = info[5]
+
+ def readQuestions(self):
+ """Reads questions section of packet"""
+ format = '!HH'
+ length = struct.calcsize(format)
+ for i in range(0, self.numQuestions):
+ name = self.readName()
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ try:
+ question = DNSQuestion(name, info[0], info[1])
+ self.questions.append(question)
+ except NonLocalNameException:
+ pass
+
+ def readInt(self):
+ """Reads an integer from the packet"""
+ format = '!I'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readCharacterString(self):
+ """Reads a character string from the packet"""
+ length = ord(self.data[self.offset])
+ self.offset += 1
+ return self.readString(length)
+
+ def readString(self, len):
+ """Reads a string of a given length from the packet"""
+ format = '!' + str(len) + 's'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readUnsignedShort(self):
+ """Reads an unsigned short from the packet"""
+ format = '!H'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readOthers(self):
+ """Reads the answers, authorities and additionals section of the packet"""
+ format = '!HHiH'
+ length = struct.calcsize(format)
+ n = self.numAnswers + self.numAuthorities + self.numAdditionals
+ for i in range(0, n):
+ domain = self.readName()
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ rec = None
+ if info[0] == _TYPE_A:
+ rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
+ elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
+ rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
+ elif info[0] == _TYPE_TXT:
+ rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
+ elif info[0] == _TYPE_SRV:
+ rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
+ elif info[0] == _TYPE_HINFO:
+ rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
+ elif info[0] == _TYPE_AAAA:
+ rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
+ else:
+ # Try to ignore types we don't know about
+ # this may mean the rest of the name is
+ # unable to be parsed, and may show errors
+ # so this is left for debugging. New types
+ # encountered need to be parsed properly.
+ #
+ #print "UNKNOWN TYPE = " + str(info[0])
+ #raise BadTypeInNameException
+ self.offset += info[3]
+
+ if rec is not None:
+ self.answers.append(rec)
+
+ def isQuery(self):
+ """Returns true if this is a query"""
+ return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
+
+ def isResponse(self):
+ """Returns true if this is a response"""
+ return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
+
+ def readUTF(self, offset, len):
+ """Reads a UTF-8 string of a given length from the packet"""
+ return self.data[offset:offset+len].decode('utf-8')
+
+ def readName(self):
+ """Reads a domain name from the packet"""
+ result = ''
+ off = self.offset
+ next = -1
+ first = off
+
+ while True:
+ len = ord(self.data[off])
+ off += 1
+ if len == 0:
+ break
+ t = len & 0xC0
+ if t == 0x00:
+ result = ''.join((result, self.readUTF(off, len) + '.'))
+ off += len
+ elif t == 0xC0:
+ if next < 0:
+ next = off + 1
+ off = ((len & 0x3F) << 8) | ord(self.data[off])
+ if off >= first:
+ raise BadDomainNameCircular(off)
+ first = off
+ else:
+ raise BadDomainName(off)
+
+ if next >= 0:
+ self.offset = next
+ else:
+ self.offset = off
+
+ return result
+
+
+class DNSOutgoing(object):
+ """Object representation of an outgoing packet"""
+
+ def __init__(self, flags, multicast = 1):
+ self.finished = 0
+ self.id = 0
+ self.multicast = multicast
+ self.flags = flags
+ self.names = {}
+ self.data = []
+ self.size = 12
+
+ self.questions = []
+ self.answers = []
+ self.authorities = []
+ self.additionals = []
+
+ def addQuestion(self, record):
+ """Adds a question"""
+ self.questions.append(record)
+
+ def addAnswer(self, inp, record):
+ """Adds an answer"""
+ if not record.suppressedBy(inp):
+ self.addAnswerAtTime(record, 0)
+
+ def addAnswerAtTime(self, record, now):
+ """Adds an answer if if does not expire by a certain time"""
+ if record is not None:
+ if now == 0 or not record.isExpired(now):
+ self.answers.append((record, now))
+
+ def addAuthorativeAnswer(self, record):
+ """Adds an authoritative answer"""
+ self.authorities.append(record)
+
+ def addAdditionalAnswer(self, record):
+ """Adds an additional answer"""
+ self.additionals.append(record)
+
+ def writeByte(self, value):
+ """Writes a single byte to the packet"""
+ format = '!c'
+ self.data.append(struct.pack(format, chr(value)))
+ self.size += 1
+
+ def insertShort(self, index, value):
+ """Inserts an unsigned short in a certain position in the packet"""
+ format = '!H'
+ self.data.insert(index, struct.pack(format, value))
+ self.size += 2
+
+ def writeShort(self, value):
+ """Writes an unsigned short to the packet"""
+ format = '!H'
+ self.data.append(struct.pack(format, value))
+ self.size += 2
+
+ def writeInt(self, value):
+ """Writes an unsigned integer to the packet"""
+ format = '!I'
+ self.data.append(struct.pack(format, int(value)))
+ self.size += 4
+
+ def writeString(self, value, length):
+ """Writes a string to the packet"""
+ format = '!' + str(length) + 's'
+ self.data.append(struct.pack(format, value))
+ self.size += length
+
+ def writeUTF(self, s):
+ """Writes a UTF-8 string of a given length to the packet"""
+ utfstr = s.encode('utf-8')
+ length = len(utfstr)
+ if length > 64:
+ raise NamePartTooLongException
+ self.writeByte(length)
+ self.writeString(utfstr, length)
+
+ def writeName(self, name):
+ """Writes a domain name to the packet"""
+
+ try:
+ # Find existing instance of this name in packet
+ #
+ index = self.names[name]
+ except KeyError:
+ # No record of this name already, so write it
+ # out as normal, recording the location of the name
+ # for future pointers to it.
+ #
+ self.names[name] = self.size
+ parts = name.split('.')
+ if parts[-1] == '':
+ parts = parts[:-1]
+ for part in parts:
+ self.writeUTF(part)
+ self.writeByte(0)
+ return
+
+ # An index was found, so write a pointer to it
+ #
+ self.writeByte((index >> 8) | 0xC0)
+ self.writeByte(index)
+
+ def writeQuestion(self, question):
+ """Writes a question to the packet"""
+ self.writeName(question.name)
+ self.writeShort(question.type)
+ self.writeShort(question.clazz)
+
+ def writeRecord(self, record, now):
+ """Writes a record (answer, authoritative answer, additional) to
+ the packet"""
+ self.writeName(record.name)
+ self.writeShort(record.type)
+ if record.unique and self.multicast:
+ self.writeShort(record.clazz | _CLASS_UNIQUE)
+ else:
+ self.writeShort(record.clazz)
+ if now == 0:
+ self.writeInt(record.ttl)
+ else:
+ self.writeInt(record.getRemainingTTL(now))
+ index = len(self.data)
+ # Adjust size for the short we will write before this record
+ #
+ self.size += 2
+ record.write(self)
+ self.size -= 2
+
+ length = len(''.join(self.data[index:]))
+ self.insertShort(index, length) # Here is the short we adjusted for
+
+ def packet(self):
+ """Returns a string containing the packet's bytes
+
+ No further parts should be added to the packet once this
+ is done."""
+ if not self.finished:
+ self.finished = 1
+ for question in self.questions:
+ self.writeQuestion(question)
+ for answer, time in self.answers:
+ self.writeRecord(answer, time)
+ for authority in self.authorities:
+ self.writeRecord(authority, 0)
+ for additional in self.additionals:
+ self.writeRecord(additional, 0)
+
+ self.insertShort(0, len(self.additionals))
+ self.insertShort(0, len(self.authorities))
+ self.insertShort(0, len(self.answers))
+ self.insertShort(0, len(self.questions))
+ self.insertShort(0, self.flags)
+ if self.multicast:
+ self.insertShort(0, 0)
+ else:
+ self.insertShort(0, self.id)
+ return ''.join(self.data)
+
+
+class DNSCache(object):
+ """A cache of DNS entries"""
+
+ def __init__(self):
+ self.cache = {}
+
+ def add(self, entry):
+ """Adds an entry"""
+ try:
+ list = self.cache[entry.key]
+ except KeyError:
+ list = self.cache[entry.key] = []
+ list.append(entry)
+
+ def remove(self, entry):
+ """Removes an entry"""
+ try:
+ list = self.cache[entry.key]
+ list.remove(entry)
+ except KeyError:
+ pass
+
+ def get(self, entry):
+ """Gets an entry by key. Will return None if there is no
+ matching entry."""
+ try:
+ list = self.cache[entry.key]
+ return list[list.index(entry)]
+ except (KeyError, ValueError):
+ return None
+
+ def getByDetails(self, name, type, clazz):
+ """Gets an entry by details. Will return None if there is
+ no matching entry."""
+ entry = DNSEntry(name, type, clazz)
+ return self.get(entry)
+
+ def entriesWithName(self, name):
+ """Returns a list of entries whose key matches the name."""
+ try:
+ return self.cache[name]
+ except KeyError:
+ return []
+
+ def entries(self):
+ """Returns a list of all entries"""
+ def add(x, y): return x+y
+ try:
+ return reduce(add, self.cache.values())
+ except Exception:
+ return []
+
+
+class Engine(threading.Thread):
+ """An engine wraps read access to sockets, allowing objects that
+ need to receive data from sockets to be called back when the
+ sockets are ready.
+
+ A reader needs a handle_read() method, which is called when the socket
+ it is interested in is ready for reading.
+
+ Writers are not implemented here, because we only send short
+ packets.
+ """
+
+ def __init__(self, zeroconf):
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.readers = {} # maps socket to reader
+ self.timeout = 5
+ self.condition = threading.Condition()
+ self.start()
+
+ def run(self):
+ while not globals()['_GLOBAL_DONE']:
+ rs = self.getReaders()
+ if len(rs) == 0:
+ # No sockets to manage, but we wait for the timeout
+ # or addition of a socket
+ #
+ self.condition.acquire()
+ self.condition.wait(self.timeout)
+ self.condition.release()
+ else:
+ try:
+ rr, wr, er = select.select(rs, [], [], self.timeout)
+ for socket in rr:
+ try:
+ self.readers[socket].handle_read()
+ except Exception:
+ if not globals()['_GLOBAL_DONE']:
+ traceback.print_exc()
+ except Exception:
+ pass
+
+ def getReaders(self):
+ self.condition.acquire()
+ result = self.readers.keys()
+ self.condition.release()
+ return result
+
+ def addReader(self, reader, socket):
+ self.condition.acquire()
+ self.readers[socket] = reader
+ self.condition.notify()
+ self.condition.release()
+
+ def delReader(self, socket):
+ self.condition.acquire()
+ del(self.readers[socket])
+ self.condition.notify()
+ self.condition.release()
+
+ def notify(self):
+ self.condition.acquire()
+ self.condition.notify()
+ self.condition.release()
+
+class Listener(object):
+ """A Listener is used by this module to listen on the multicast
+ group to which DNS messages are sent, allowing the implementation
+ to cache information as it arrives.
+
+ It requires registration with an Engine object in order to have
+ the read() method called when a socket is availble for reading."""
+
+ def __init__(self, zeroconf):
+ self.zeroconf = zeroconf
+ self.zeroconf.engine.addReader(self, self.zeroconf.socket)
+
+ def handle_read(self):
+ data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
+ self.data = data
+ msg = DNSIncoming(data)
+ if msg.isQuery():
+ # Always multicast responses
+ #
+ if port == _MDNS_PORT:
+ self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
+ # If it's not a multicast query, reply via unicast
+ # and multicast
+ #
+ elif port == _DNS_PORT:
+ self.zeroconf.handleQuery(msg, addr, port)
+ self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
+ else:
+ self.zeroconf.handleResponse(msg)
+
+
+class Reaper(threading.Thread):
+ """A Reaper is used by this module to remove cache entries that
+ have expired."""
+
+ def __init__(self, zeroconf):
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.start()
+
+ def run(self):
+ while True:
+ self.zeroconf.wait(10 * 1000)
+ if globals()['_GLOBAL_DONE']:
+ return
+ now = currentTimeMillis()
+ for record in self.zeroconf.cache.entries():
+ if record.isExpired(now):
+ self.zeroconf.updateRecord(now, record)
+ self.zeroconf.cache.remove(record)
+
+
+class ServiceBrowser(threading.Thread):
+ """Used to browse for a service of a specific type.
+
+ The listener object will have its addService() and
+ removeService() methods called when this browser
+ discovers changes in the services availability."""
+
+ def __init__(self, zeroconf, type, listener):
+ """Creates a browser for a specific type"""
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.type = type
+ self.listener = listener
+ self.services = {}
+ self.nextTime = currentTimeMillis()
+ self.delay = _BROWSER_TIME
+ self.list = []
+
+ self.done = 0
+
+ self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
+ self.start()
+
+ def updateRecord(self, zeroconf, now, record):
+ """Callback invoked by Zeroconf when new information arrives.
+
+ Updates information required by browser in the Zeroconf cache."""
+ if record.type == _TYPE_PTR and record.name == self.type:
+ expired = record.isExpired(now)
+ try:
+ oldrecord = self.services[record.alias.lower()]
+ if not expired:
+ oldrecord.resetTTL(record)
+ else:
+ del(self.services[record.alias.lower()])
+ callback = lambda x: self.listener.removeService(x, self.type, record.alias)
+ self.list.append(callback)
+ return
+ except Exception:
+ if not expired:
+ self.services[record.alias.lower()] = record
+ callback = lambda x: self.listener.addService(x, self.type, record.alias)
+ self.list.append(callback)
+
+ expires = record.getExpirationTime(75)
+ if expires < self.nextTime:
+ self.nextTime = expires
+
+ def cancel(self):
+ self.done = 1
+ self.zeroconf.notifyAll()
+
+ def run(self):
+ while True:
+ event = None
+ now = currentTimeMillis()
+ if len(self.list) == 0 and self.nextTime > now:
+ self.zeroconf.wait(self.nextTime - now)
+ if globals()['_GLOBAL_DONE'] or self.done:
+ return
+ now = currentTimeMillis()
+
+ if self.nextTime <= now:
+ out = DNSOutgoing(_FLAGS_QR_QUERY)
+ out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
+ for record in self.services.values():
+ if not record.isExpired(now):
+ out.addAnswerAtTime(record, now)
+ self.zeroconf.send(out)
+ self.nextTime = now + self.delay
+ self.delay = min(20 * 1000, self.delay * 2)
+
+ if len(self.list) > 0:
+ event = self.list.pop(0)
+
+ if event is not None:
+ event(self.zeroconf)
+
+
+class ServiceInfo(object):
+ """Service information"""
+
+ def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
+ """Create a service description.
+
+ type: fully qualified service type name
+ name: fully qualified service name
+ address: IP address as unsigned short, network byte order
+ port: port that the service runs on
+ weight: weight of the service
+ priority: priority of the service
+ properties: dictionary of properties (or a string holding the bytes for the text field)
+ server: fully qualified name for service host (defaults to name)"""
+
+ if not name.endswith(type):
+ raise BadTypeInNameException
+ self.type = type
+ self.name = name
+ self.address = address
+ self.port = port
+ self.weight = weight
+ self.priority = priority
+ if server:
+ self.server = server
+ else:
+ self.server = name
+ self.setProperties(properties)
+
+ def setProperties(self, properties):
+ """Sets properties and text of this info from a dictionary"""
+ if isinstance(properties, dict):
+ self.properties = properties
+ list = []
+ result = ''
+ for key in properties:
+ value = properties[key]
+ if value is None:
+ suffix = ''
+ elif isinstance(value, str):
+ suffix = value
+ elif isinstance(value, int):
+ if value:
+ suffix = 'true'
+ else:
+ suffix = 'false'
+ else:
+ suffix = ''
+ list.append('='.join((key, suffix)))
+ for item in list:
+ result = ''.join((result, struct.pack('!c', chr(len(item))), item))
+ self.text = result
+ else:
+ self.text = properties
+
+ def setText(self, text):
+ """Sets properties and text given a text field"""
+ self.text = text
+ try:
+ result = {}
+ end = len(text)
+ index = 0
+ strs = []
+ while index < end:
+ length = ord(text[index])
+ index += 1
+ strs.append(text[index:index+length])
+ index += length
+
+ for s in strs:
+ eindex = s.find('=')
+ if eindex == -1:
+ # No equals sign at all
+ key = s
+ value = 0
+ else:
+ key = s[:eindex]
+ value = s[eindex+1:]
+ if value == 'true':
+ value = 1
+ elif value == 'false' or not value:
+ value = 0
+
+ # Only update non-existent properties
+ if key and result.get(key) == None:
+ result[key] = value
+
+ self.properties = result
+ except Exception:
+ traceback.print_exc()
+ self.properties = None
+
+ def getType(self):
+ """Type accessor"""
+ return self.type
+
+ def getName(self):
+ """Name accessor"""
+ if self.type is not None and self.name.endswith("." + self.type):
+ return self.name[:len(self.name) - len(self.type) - 1]
+ return self.name
+
+ def getAddress(self):
+ """Address accessor"""
+ return self.address
+
+ def getPort(self):
+ """Port accessor"""
+ return self.port
+
+ def getPriority(self):
+ """Pirority accessor"""
+ return self.priority
+
+ def getWeight(self):
+ """Weight accessor"""
+ return self.weight
+
+ def getProperties(self):
+ """Properties accessor"""
+ return self.properties
+
+ def getText(self):
+ """Text accessor"""
+ return self.text
+
+ def getServer(self):
+ """Server accessor"""
+ return self.server
+
+ def updateRecord(self, zeroconf, now, record):
+ """Updates service information from a DNS record"""
+ if record is not None and not record.isExpired(now):
+ if record.type == _TYPE_A:
+ #if record.name == self.name:
+ if record.name == self.server:
+ self.address = record.address
+ elif record.type == _TYPE_SRV:
+ if record.name == self.name:
+ self.server = record.server
+ self.port = record.port
+ self.weight = record.weight
+ self.priority = record.priority
+ #self.address = None
+ self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
+ elif record.type == _TYPE_TXT:
+ if record.name == self.name:
+ self.setText(record.text)
+
+ def request(self, zeroconf, timeout):
+ """Returns true if the service could be discovered on the
+ network, and updates this object with details discovered.
+ """
+ now = currentTimeMillis()
+ delay = _LISTENER_TIME
+ next = now + delay
+ last = now + timeout
+ result = 0
+ try:
+ zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
+ while self.server is None or self.address is None or self.text is None:
+ if last <= now:
+ return 0
+ if next <= now:
+ out = DNSOutgoing(_FLAGS_QR_QUERY)
+ out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
+ out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
+ if self.server is not None:
+ out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
+ zeroconf.send(out)
+ next = now + delay
+ delay = delay * 2
+
+ zeroconf.wait(min(next, last) - now)
+ now = currentTimeMillis()
+ result = 1
+ finally:
+ zeroconf.removeListener(self)
+
+ return result
+
+ def __eq__(self, other):
+ """Tests equality of service name"""
+ if isinstance(other, ServiceInfo):
+ return other.name == self.name
+ return 0
+
+ def __ne__(self, other):
+ """Non-equality test"""
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ """String representation"""
+ result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
+ if self.text is None:
+ result += "None"
+ else:
+ if len(self.text) < 20:
+ result += self.text
+ else:
+ result += self.text[:17] + "..."
+ result += "]"
+ return result
+
+
+class Zeroconf(object):
+ """Implementation of Zeroconf Multicast DNS Service Discovery
+
+ Supports registration, unregistration, queries and browsing.
+ """
+ def __init__(self, bindaddress=None):
+ """Creates an instance of the Zeroconf class, establishing
+ multicast communications, listening and reaping threads."""
+ globals()['_GLOBAL_DONE'] = 0
+ if bindaddress is None:
+ self.intf = socket.gethostbyname(socket.gethostname())
+ else:
+ self.intf = bindaddress
+ self.group = ('', _MDNS_PORT)
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except Exception:
+ # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
+ # multicast UDP sockets (p 731, "TCP/IP Illustrated,
+ # Volume 2"), but some BSD-derived systems require
+ # SO_REUSEPORT to be specified explicity. Also, not all
+ # versions of Python have SO_REUSEPORT available. So
+ # if you're on a BSD-based system, and haven't upgraded
+ # to Python 2.3 yet, you may find this library doesn't
+ # work as expected.
+ #
+ pass
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
+ try:
+ self.socket.bind(self.group)
+ except Exception:
+ # Some versions of linux raise an exception even though
+ # the SO_REUSE* options have been set, so ignore it
+ #
+ pass
+ #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
+
+ self.listeners = []
+ self.browsers = []
+ self.services = {}
+ self.servicetypes = {}
+
+ self.cache = DNSCache()
+
+ self.condition = threading.Condition()
+
+ self.engine = Engine(self)
+ self.listener = Listener(self)
+ self.reaper = Reaper(self)
+
+ def isLoopback(self):
+ return self.intf.startswith("127.0.0.1")
+
+ def isLinklocal(self):
+ return self.intf.startswith("169.254.")
+
+ def wait(self, timeout):
+ """Calling thread waits for a given number of milliseconds or
+ until notified."""
+ self.condition.acquire()
+ self.condition.wait(timeout/1000)
+ self.condition.release()
+
+ def notifyAll(self):
+ """Notifies all waiting threads"""
+ self.condition.acquire()
+ self.condition.notifyAll()
+ self.condition.release()
+
+ def getServiceInfo(self, type, name, timeout=3000):
+ """Returns network's service information for a particular
+ name and type, or None if no service matches by the timeout,
+ which defaults to 3 seconds."""
+ info = ServiceInfo(type, name)
+ if info.request(self, timeout):
+ return info
+ return None
+
+ def addServiceListener(self, type, listener):
+ """Adds a listener for a particular service type. This object
+ will then have its updateRecord method called when information
+ arrives for that type."""
+ self.removeServiceListener(listener)
+ self.browsers.append(ServiceBrowser(self, type, listener))
+
+ def removeServiceListener(self, listener):
+ """Removes a listener from the set that is currently listening."""
+ for browser in self.browsers:
+ if browser.listener == listener:
+ browser.cancel()
+ del(browser)
+
+ def registerService(self, info, ttl=_DNS_TTL):
+ """Registers service information to the network with a default TTL
+ of 60 seconds. Zeroconf will then respond to requests for
+ information for that service. The name of the service may be
+ changed if needed to make it unique on the network."""
+ self.checkService(info)
+ self.services[info.name.lower()] = info
+ if self.servicetypes.has_key(info.type):
+ self.servicetypes[info.type]+=1
+ else:
+ self.servicetypes[info.type]=1
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _REGISTER_TIME
+
+ def unregisterService(self, info):
+ """Unregister a service."""
+ try:
+ del(self.services[info.name.lower()])
+ if self.servicetypes[info.type]>1:
+ self.servicetypes[info.type]-=1
+ else:
+ del self.servicetypes[info.type]
+ except KeyError:
+ pass
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _UNREGISTER_TIME
+
+ def unregisterAllServices(self):
+ """Unregister all registered services."""
+ if len(self.services) > 0:
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ for info in self.services.values():
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _UNREGISTER_TIME
+
+ def checkService(self, info):
+ """Checks the network for a unique service name, modifying the
+ ServiceInfo passed in if it is not unique."""
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ for record in self.cache.entriesWithName(info.type):
+ if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
+ if (info.name.find('.') < 0):
+ info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
+ self.checkService(info)
+ return
+ raise NonUniqueNameException
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
+ self.debug = out
+ out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
+ out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
+ self.send(out)
+ i += 1
+ nextTime += _CHECK_TIME
+
+ def addListener(self, listener, question):
+ """Adds a listener for a given question. The listener will have
+ its updateRecord method called when information is available to
+ answer the question."""
+ now = currentTimeMillis()
+ self.listeners.append(listener)
+ if question is not None:
+ for record in self.cache.entriesWithName(question.name):
+ if question.answeredBy(record) and not record.isExpired(now):
+ listener.updateRecord(self, now, record)
+ self.notifyAll()
+
+ def removeListener(self, listener):
+ """Removes a listener."""
+ try:
+ self.listeners.remove(listener)
+ self.notifyAll()
+ except Exception:
+ pass
+
+ def updateRecord(self, now, rec):
+ """Used to notify listeners of new information that has updated
+ a record."""
+ for listener in self.listeners:
+ listener.updateRecord(self, now, rec)
+ self.notifyAll()
+
+ def handleResponse(self, msg):
+ """Deal with incoming response packets. All answers
+ are held in the cache, and listeners are notified."""
+ now = currentTimeMillis()
+ for record in msg.answers:
+ expired = record.isExpired(now)
+ if record in self.cache.entries():
+ if expired:
+ self.cache.remove(record)
+ else:
+ entry = self.cache.get(record)
+ if entry is not None:
+ entry.resetTTL(record)
+ record = entry
+ else:
+ self.cache.add(record)
+
+ self.updateRecord(now, record)
+
+ def handleQuery(self, msg, addr, port):
+ """Deal with incoming query packets. Provides a response if
+ possible."""
+ out = None
+
+ # Support unicast client responses
+ #
+ if port != _MDNS_PORT:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
+ for question in msg.questions:
+ out.addQuestion(question)
+
+ for question in msg.questions:
+ if question.type == _TYPE_PTR:
+ if question.name == "_services._dns-sd._udp.local.":
+ for stype in self.servicetypes.keys():
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
+ for service in self.services.values():
+ if question.name == service.type:
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
+ else:
+ try:
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+
+ # Answer A record queries for any service addresses we know
+ if question.type == _TYPE_A or question.type == _TYPE_ANY:
+ for service in self.services.values():
+ if service.server == question.name.lower():
+ out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
+
+ service = self.services.get(question.name.lower(), None)
+ if not service: continue
+
+ if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
+ out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
+ if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
+ out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
+ if question.type == _TYPE_SRV:
+ out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
+ except Exception:
+ traceback.print_exc()
+
+ if out is not None and out.answers:
+ out.id = msg.id
+ self.send(out, addr, port)
+
+ def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
+ """Sends an outgoing packet."""
+ # This is a quick test to see if we can parse the packets we generate
+ #temp = DNSIncoming(out.packet())
+ try:
+ self.socket.sendto(out.packet(), 0, (addr, port))
+ except Exception:
+ # Ignore this, it may be a temporary loss of network connection
+ pass
+
+ def close(self):
+ """Ends the background threads, and prevent this instance from
+ servicing further queries."""
+ if globals()['_GLOBAL_DONE'] == 0:
+ globals()['_GLOBAL_DONE'] = 1
+ self.notifyAll()
+ self.engine.notify()
+ self.unregisterAllServices()
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
+ self.socket.close()
+
+# Test a few module features, including service registration, service
+# query (for Zoe), and service unregistration.
+
+if __name__ == '__main__':
+ print "Multicast DNS Service Discovery for Python, version", __version__
+ r = Zeroconf()
+ print "1. Testing registration of a service..."
+ desc = {'version':'0.10','a':'test value', 'b':'another value'}
+ info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
+ print " Registering service..."
+ r.registerService(info)
+ print " Registration done."
+ print "2. Testing query of service information..."
+ print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
+ print " Query done."
+ print "3. Testing query of own service..."
+ print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
+ print " Query done."
+ print "4. Testing unregister of service information..."
+ r.unregisterService(info)
+ print " Unregister done."
+ r.close()
+
+# no-check-code
diff --git a/hgext/zeroconf/__init__.py b/hgext/zeroconf/__init__.py
new file mode 100644
index 0000000..52ceffa
--- /dev/null
+++ b/hgext/zeroconf/__init__.py
@@ -0,0 +1,188 @@
+# zeroconf.py - zeroconf support for Mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''discover and advertise repositories on the local network
+
+Zeroconf-enabled repositories will be announced in a network without
+the need to configure a server or a service. They can be discovered
+without knowing their actual IP address.
+
+To allow other people to discover your repository using run
+:hg:`serve` in your repository::
+
+ $ cd test
+ $ hg serve
+
+You can discover Zeroconf-enabled repositories by running
+:hg:`paths`::
+
+ $ hg paths
+ zc-test = http://example.com:8000/test
+'''
+
+import socket, time, os
+
+import Zeroconf
+from mercurial import ui, hg, encoding, util, dispatch
+from mercurial import extensions
+from mercurial.hgweb import hgweb_mod
+from mercurial.hgweb import hgwebdir_mod
+
+testedwith = 'internal'
+
+# publish
+
+server = None
+localip = None
+
+def getip():
+ # finds external-facing interface without sending any packets (Linux)
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(('1.0.0.1', 0))
+ ip = s.getsockname()[0]
+ return ip
+ except socket.error:
+ pass
+
+ # Generic method, sometimes gives useless results
+ try:
+ dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
+ if not dumbip.startswith('127.') and ':' not in dumbip:
+ return dumbip
+ except (socket.gaierror, socket.herror):
+ dumbip = '127.0.0.1'
+
+ # works elsewhere, but actually sends a packet
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(('1.0.0.1', 1))
+ ip = s.getsockname()[0]
+ return ip
+ except socket.error:
+ pass
+
+ return dumbip
+
+def publish(name, desc, path, port):
+ global server, localip
+ if not server:
+ ip = getip()
+ if ip.startswith('127.'):
+ # if we have no internet connection, this can happen.
+ return
+ localip = socket.inet_aton(ip)
+ server = Zeroconf.Zeroconf(ip)
+
+ hostname = socket.gethostname().split('.')[0]
+ host = hostname + ".local"
+ name = "%s-%s" % (hostname, name)
+
+ # advertise to browsers
+ svc = Zeroconf.ServiceInfo('_http._tcp.local.',
+ name + '._http._tcp.local.',
+ server = host,
+ port = port,
+ properties = {'description': desc,
+ 'path': "/" + path},
+ address = localip, weight = 0, priority = 0)
+ server.registerService(svc)
+
+ # advertise to Mercurial clients
+ svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
+ name + '._hg._tcp.local.',
+ server = host,
+ port = port,
+ properties = {'description': desc,
+ 'path': "/" + path},
+ address = localip, weight = 0, priority = 0)
+ server.registerService(svc)
+
+class hgwebzc(hgweb_mod.hgweb):
+ def __init__(self, repo, name=None, baseui=None):
+ super(hgwebzc, self).__init__(repo, name=name, baseui=baseui)
+ name = self.reponame or os.path.basename(self.repo.root)
+ path = self.repo.ui.config("web", "prefix", "").strip('/')
+ desc = self.repo.ui.config("web", "description", name)
+ publish(name, desc, path,
+ util.getport(self.repo.ui.config("web", "port", 8000)))
+
+class hgwebdirzc(hgwebdir_mod.hgwebdir):
+ def __init__(self, conf, baseui=None):
+ super(hgwebdirzc, self).__init__(conf, baseui=baseui)
+ prefix = self.ui.config("web", "prefix", "").strip('/') + '/'
+ for repo, path in self.repos:
+ u = self.ui.copy()
+ u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+ name = os.path.basename(repo)
+ path = (prefix + repo).strip('/')
+ desc = u.config('web', 'description', name)
+ publish(name, desc, path,
+ util.getport(u.config("web", "port", 8000)))
+
+# listen
+
+class listener(object):
+ def __init__(self):
+ self.found = {}
+ def removeService(self, server, type, name):
+ if repr(name) in self.found:
+ del self.found[repr(name)]
+ def addService(self, server, type, name):
+ self.found[repr(name)] = server.getServiceInfo(type, name)
+
+def getzcpaths():
+ ip = getip()
+ if ip.startswith('127.'):
+ return
+ server = Zeroconf.Zeroconf(ip)
+ l = listener()
+ Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
+ time.sleep(1)
+ server.close()
+ for value in l.found.values():
+ name = value.name[:value.name.index('.')]
+ url = "http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port,
+ value.properties.get("path", "/"))
+ yield "zc-" + name, url
+
+def config(orig, self, section, key, default=None, untrusted=False):
+ if section == "paths" and key.startswith("zc-"):
+ for name, path in getzcpaths():
+ if name == key:
+ return path
+ return orig(self, section, key, default, untrusted)
+
+def configitems(orig, self, section, untrusted=False):
+ repos = orig(self, section, untrusted)
+ if section == "paths":
+ repos += getzcpaths()
+ return repos
+
+def defaultdest(orig, source):
+ for name, path in getzcpaths():
+ if path == source:
+ return name.encode(encoding.encoding)
+ return orig(source)
+
+def cleanupafterdispatch(orig, ui, options, cmd, cmdfunc):
+ try:
+ return orig(ui, options, cmd, cmdfunc)
+ finally:
+ # we need to call close() on the server to notify() the various
+ # threading Conditions and allow the background threads to exit
+ global server
+ if server:
+ server.close()
+
+extensions.wrapfunction(dispatch, '_runcommand', cleanupafterdispatch)
+
+extensions.wrapfunction(ui.ui, 'config', config)
+extensions.wrapfunction(ui.ui, 'configitems', configitems)
+extensions.wrapfunction(hg, 'defaultdest', defaultdest)
+hgweb_mod.hgweb = hgwebzc
+hgwebdir_mod.hgwebdir = hgwebdirzc