summaryrefslogtreecommitdiff
path: root/hgext
diff options
context:
space:
mode:
Diffstat (limited to 'hgext')
-rw-r--r--hgext/acl.py102
-rw-r--r--hgext/bugzilla.py341
-rw-r--r--hgext/children.py7
-rw-r--r--hgext/churn.py6
-rw-r--r--hgext/color.py24
-rw-r--r--hgext/convert/__init__.py21
-rw-r--r--hgext/convert/bzr.py101
-rw-r--r--hgext/convert/common.py82
-rw-r--r--hgext/convert/convcmd.py38
-rw-r--r--hgext/convert/cvs.py7
-rw-r--r--hgext/convert/cvsps.py60
-rw-r--r--hgext/convert/darcs.py6
-rw-r--r--hgext/convert/filemap.py52
-rw-r--r--hgext/convert/git.py30
-rw-r--r--hgext/convert/hg.py31
-rw-r--r--hgext/convert/monotone.py12
-rw-r--r--hgext/convert/p4.py3
-rw-r--r--hgext/convert/subversion.py262
-rw-r--r--hgext/convert/transport.py10
-rw-r--r--hgext/eol.py30
-rw-r--r--hgext/extdiff.py7
-rw-r--r--hgext/factotum.py120
-rw-r--r--hgext/fetch.py25
-rw-r--r--hgext/gpg.py12
-rw-r--r--hgext/graphlog.py393
-rw-r--r--hgext/hgcia.py10
-rw-r--r--hgext/hgk.py8
-rw-r--r--hgext/highlight/__init__.py7
-rw-r--r--hgext/histedit.py715
-rw-r--r--hgext/inotify/__init__.py8
-rw-r--r--hgext/inotify/linuxserver.py7
-rw-r--r--hgext/inotify/server.py4
-rw-r--r--hgext/interhg.py2
-rw-r--r--hgext/keyword.py141
-rw-r--r--hgext/largefiles/CONTRIBUTORS4
-rw-r--r--hgext/largefiles/__init__.py102
-rw-r--r--hgext/largefiles/basestore.py195
-rw-r--r--hgext/largefiles/lfcommands.py549
-rw-r--r--hgext/largefiles/lfutil.py467
-rw-r--r--hgext/largefiles/localstore.py82
-rw-r--r--hgext/largefiles/overrides.py1080
-rw-r--r--hgext/largefiles/proto.py173
-rw-r--r--hgext/largefiles/remotestore.py110
-rw-r--r--hgext/largefiles/reposetup.py475
-rw-r--r--hgext/largefiles/uisetup.py167
-rw-r--r--hgext/largefiles/wirestore.py37
-rw-r--r--hgext/mq.py704
-rw-r--r--hgext/notify.py171
-rw-r--r--hgext/pager.py53
-rw-r--r--hgext/patchbomb.py197
-rw-r--r--hgext/progress.py55
-rw-r--r--hgext/purge.py8
-rw-r--r--hgext/rebase.py304
-rw-r--r--hgext/record.py101
-rw-r--r--hgext/relink.py9
-rw-r--r--hgext/schemes.py9
-rw-r--r--hgext/share.py39
-rw-r--r--hgext/transplant.py125
-rw-r--r--hgext/win32mbcs.py13
-rw-r--r--hgext/win32text.py4
-rw-r--r--hgext/zeroconf/Zeroconf.py4
-rw-r--r--hgext/zeroconf/__init__.py9
62 files changed, 1414 insertions, 6516 deletions
diff --git a/hgext/acl.py b/hgext/acl.py
index 2bf41aa..a50fa72 100644
--- a/hgext/acl.py
+++ b/hgext/acl.py
@@ -32,7 +32,7 @@ The order in which access checks are performed is:
The allow and deny sections take key-value pairs.
Branch-based Access Control
----------------------------
+...........................
Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
have branch-based access control. Keys in these sections can be
@@ -46,11 +46,8 @@ The corresponding values can be either:
- a comma-separated list containing users and groups, or
- an asterisk, to match anyone;
-You can add the "!" prefix to a user or group name to invert the sense
-of the match.
-
Path-based Access Control
--------------------------
+.........................
Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
access control. Keys in these sections accept a subtree pattern (with
@@ -58,7 +55,7 @@ a glob syntax by default). The corresponding values follow the same
syntax as the other sections above.
Groups
-------
+......
Group names must be prefixed with an ``@`` symbol. Specifying a group
name has the same effect as specifying all the users in that group.
@@ -69,7 +66,7 @@ a Unix-like system, the list of users will be taken from the OS.
Otherwise, an exception will be raised.
Example Configuration
----------------------
+.....................
::
@@ -142,61 +139,19 @@ Example Configuration
# under the "images" folder:
images/** = jack, @designers
- # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
- # will have write access to any file under the "resources" folder
- # (except for 1 file. See acl.deny):
+ # Everyone (except for "user6" - see acl.deny above) will have write
+ # access to any file under the "resources" folder (except for 1
+ # file. See acl.deny):
src/main/resources/** = *
.hgtags = release_engineer
-Examples using the "!" prefix
-.............................
-
-Suppose there's a branch that only a given user (or group) should be able to
-push to, and you don't want to restrict access to any other branch that may
-be created.
-
-The "!" prefix allows you to prevent anyone except a given user or group to
-push changesets in a given branch or path.
-
-In the examples below, we will:
-1) Deny access to branch "ring" to anyone but user "gollum"
-2) Deny access to branch "lake" to anyone but members of the group "hobbit"
-3) Deny access to a file to anyone but user "gollum"
-
-::
-
- [acl.allow.branches]
- # Empty
-
- [acl.deny.branches]
-
- # 1) only 'gollum' can commit to branch 'ring';
- # 'gollum' and anyone else can still commit to any other branch.
- ring = !gollum
-
- # 2) only members of the group 'hobbit' can commit to branch 'lake';
- # 'hobbit' members and anyone else can still commit to any other branch.
- lake = !@hobbit
-
- # You can also deny access based on file paths:
-
- [acl.allow]
- # Empty
-
- [acl.deny]
- # 3) only 'gollum' can change the file below;
- # 'gollum' and anyone else can still change any other file.
- /misty/mountains/cave/ring = !gollum
-
'''
from mercurial.i18n import _
from mercurial import util, match
import getpass, urllib
-testedwith = 'internal'
-
def _getusers(ui, group):
# First, try to use group definition from section [acl.groups]
@@ -217,21 +172,7 @@ def _usermatch(ui, user, usersorgroups):
return True
for ug in usersorgroups.replace(',', ' ').split():
-
- if ug.startswith('!'):
- # Test for excluded user or group. Format:
- # if ug is a user name: !username
- # if ug is a group name: !@groupname
- ug = ug[1:]
- if not ug.startswith('@') and user != ug \
- or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
- return True
-
- # Test for user or group. Format:
- # if ug is a user name: username
- # if ug is a group name: @groupname
- elif user == ug \
- or ug.startswith('@') and user in _getusers(ui, ug[1:]):
+ if user == ug or ug.find('@') == 0 and user in _getusers(ui, ug[1:]):
return True
return False
@@ -247,20 +188,15 @@ def buildmatch(ui, repo, user, key):
ui.debug('acl: %s enabled, %d entries for user %s\n' %
(key, len(pats), user))
- # Branch-based ACL
if not repo:
if pats:
- # If there's an asterisk (meaning "any branch"), always return True;
- # Otherwise, test if b is in pats
- if '*' in pats:
- return util.always
- return lambda b: b in pats
- return util.never
-
- # Path-based ACL
+ return lambda b: '*' in pats or b in pats
+ return lambda b: False
+
if pats:
return match.match(repo.root, '', pats)
- return util.never
+ return match.exact(repo.root, '', [])
+
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
@@ -280,8 +216,6 @@ def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
if user is None:
user = getpass.getuser()
- ui.debug('acl: checking access for user "%s"\n' % user)
-
cfg = ui.config('acl', 'config')
if cfg:
ui.readconfig(cfg, sections = ['acl.groups', 'acl.allow.branches',
@@ -308,9 +242,9 @@ def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
for f in ctx.files():
if deny and deny(f):
- raise util.Abort(_('acl: user "%s" denied on "%s"'
- ' (changeset "%s")') % (user, f, ctx))
+ ui.debug('acl: user %s denied on %s\n' % (user, f))
+ raise util.Abort(_('acl: access denied for changeset %s') % ctx)
if allow and not allow(f):
- raise util.Abort(_('acl: user "%s" not allowed on "%s"'
- ' (changeset "%s")') % (user, f, ctx))
- ui.debug('acl: path access granted: "%s"\n' % ctx)
+ ui.debug('acl: user %s not allowed on %s\n' % (user, f))
+ raise util.Abort(_('acl: access denied for changeset %s') % ctx)
+ ui.debug('acl: allowing changeset %s\n' % ctx)
diff --git a/hgext/bugzilla.py b/hgext/bugzilla.py
index 42eef74..705694c 100644
--- a/hgext/bugzilla.py
+++ b/hgext/bugzilla.py
@@ -1,7 +1,7 @@
# bugzilla.py - bugzilla integration for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-# Copyright 2011-2 Jim Hague <jim.hague@acm.org>
+# Copyright 2011 Jim Hague <jim.hague@acm.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -12,8 +12,7 @@ This hook extension adds comments on bugs in Bugzilla when changesets
that refer to bugs by Bugzilla ID are seen. The comment is formatted using
the Mercurial template mechanism.
-The bug references can optionally include an update for Bugzilla of the
-hours spent working on the bug. Bugs can also be marked fixed.
+The hook does not change bug status.
Three basic modes of access to Bugzilla are provided:
@@ -33,13 +32,13 @@ permission to read Bugzilla configuration details and the necessary
MySQL user and password to have full access rights to the Bugzilla
database. For these reasons this access mode is now considered
deprecated, and will not be updated for new Bugzilla versions going
-forward. Only adding comments is supported in this access mode.
+forward.
Access via XMLRPC needs a Bugzilla username and password to be specified
in the configuration. Comments are added under that username. Since the
configuration must be readable by all Mercurial users, it is recommended
that the rights of that user are restricted in Bugzilla to the minimum
-necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
+necessary to add comments.
Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
email to the Bugzilla email interface to submit comments to bugs.
@@ -47,8 +46,7 @@ The From: address in the email is set to the email address of the Mercurial
user, so the comment appears to come from the Mercurial user. In the event
that the Mercurial user email is not recognised by Bugzilla as a Bugzilla
user, the email associated with the Bugzilla username used to log into
-Bugzilla is used instead as the source of the comment. Marking bugs fixed
-works on all supported Bugzilla versions.
+Bugzilla is used instead as the source of the comment.
Configuration items common to all access modes:
@@ -64,34 +62,11 @@ bugzilla.version
including 2.18.
bugzilla.regexp
- Regular expression to match bug IDs for update in changeset commit message.
- It must contain one "()" named group ``<ids>`` containing the bug
- IDs separated by non-digit characters. It may also contain
- a named group ``<hours>`` with a floating-point number giving the
- hours worked on the bug. If no named groups are present, the first
- "()" group is assumed to contain the bug IDs, and work time is not
- updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
- ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
- variations thereof, followed by an hours number prefixed by ``h`` or
- ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
-
-bugzilla.fixregexp
- Regular expression to match bug IDs for marking fixed in changeset
- commit message. This must contain a "()" named group ``<ids>` containing
- the bug IDs separated by non-digit characters. It may also contain
- a named group ``<hours>`` with a floating-point number giving the
- hours worked on the bug. If no named groups are present, the first
- "()" group is assumed to contain the bug IDs, and work time is not
- updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
- ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
- variations thereof, followed by an hours number prefixed by ``h`` or
- ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
-
-bugzilla.fixstatus
- The status to set a bug to when marking fixed. Default ``RESOLVED``.
-
-bugzilla.fixresolution
- The resolution to set a bug to when marking fixed. Default ``FIXED``.
+ Regular expression to match bug IDs in changeset commit message.
+ Must contain one "()" group. The default expression matches ``Bug
+ 1234``, ``Bug no. 1234``, ``Bug number 1234``, ``Bugs 1234,5678``,
+ ``Bug 1234 and 5678`` and variations thereof. Matching is case
+ insensitive.
bugzilla.style
The style file to use when formatting comments.
@@ -280,9 +255,7 @@ All the above add a comment to the Bugzilla bug record of the form::
from mercurial.i18n import _
from mercurial.node import short
from mercurial import cmdutil, mail, templater, util
-import re, time, urlparse, xmlrpclib
-
-testedwith = 'internal'
+import re, time, xmlrpclib
class bzaccess(object):
'''Base class for access to Bugzilla.'''
@@ -301,35 +274,24 @@ class bzaccess(object):
return user
# Methods to be implemented by access classes.
- #
- # 'bugs' is a dict keyed on bug id, where values are a dict holding
- # updates to bug state. Recognised dict keys are:
- #
- # 'hours': Value, float containing work hours to be updated.
- # 'fix': If key present, bug is to be marked fixed. Value ignored.
-
- def filter_real_bug_ids(self, bugs):
- '''remove bug IDs that do not exist in Bugzilla from bugs.'''
+ def filter_real_bug_ids(self, ids):
+ '''remove bug IDs that do not exist in Bugzilla from set.'''
pass
- def filter_cset_known_bug_ids(self, node, bugs):
- '''remove bug IDs where node occurs in comment text from bugs.'''
+ def filter_cset_known_bug_ids(self, node, ids):
+ '''remove bug IDs where node occurs in comment text from set.'''
pass
- def updatebug(self, bugid, newstate, text, committer):
- '''update the specified bug. Add comment text and set new states.
+ def add_comment(self, bugid, text, committer):
+ '''add comment to bug.
If possible add the comment as being from the committer of
the changeset. Otherwise use the default Bugzilla user.
'''
pass
- def notify(self, bugs, committer):
- '''Force sending of Bugzilla notification emails.
-
- Only required if the access method does not trigger notification
- emails automatically.
- '''
+ def notify(self, ids, committer):
+ '''Force sending of Bugzilla notification emails.'''
pass
# Bugzilla via direct access to MySQL database.
@@ -338,7 +300,7 @@ class bzmysql(bzaccess):
The earliest Bugzilla version this is tested with is version 2.16.
- If your Bugzilla is version 3.4 or above, you are strongly
+ If your Bugzilla is version 3.2 or above, you are strongly
recommended to use the XMLRPC access method instead.
'''
@@ -391,35 +353,33 @@ class bzmysql(bzaccess):
raise util.Abort(_('unknown database schema'))
return ids[0][0]
- def filter_real_bug_ids(self, bugs):
- '''filter not-existing bugs from set.'''
+ def filter_real_bug_ids(self, ids):
+ '''filter not-existing bug ids from set.'''
self.run('select bug_id from bugs where bug_id in %s' %
- bzmysql.sql_buglist(bugs.keys()))
- existing = [id for (id,) in self.cursor.fetchall()]
- for id in bugs.keys():
- if id not in existing:
- self.ui.status(_('bug %d does not exist\n') % id)
- del bugs[id]
-
- def filter_cset_known_bug_ids(self, node, bugs):
+ bzmysql.sql_buglist(ids))
+ return set([c[0] for c in self.cursor.fetchall()])
+
+ def filter_cset_known_bug_ids(self, node, ids):
'''filter bug ids that already refer to this changeset from set.'''
+
self.run('''select bug_id from longdescs where
bug_id in %s and thetext like "%%%s%%"''' %
- (bzmysql.sql_buglist(bugs.keys()), short(node)))
+ (bzmysql.sql_buglist(ids), short(node)))
for (id,) in self.cursor.fetchall():
self.ui.status(_('bug %d already knows about changeset %s\n') %
(id, short(node)))
- del bugs[id]
+ ids.discard(id)
+ return ids
- def notify(self, bugs, committer):
+ def notify(self, ids, committer):
'''tell bugzilla to send mail.'''
+
self.ui.status(_('telling bugzilla to send mail:\n'))
(user, userid) = self.get_bugzilla_user(committer)
- for id in bugs.keys():
+ for id in ids:
self.ui.status(_(' bug %s\n') % id)
cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
- bzdir = self.ui.config('bugzilla', 'bzdir',
- '/var/www/html/bugzilla')
+ bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
try:
# Backwards-compatible with old notify string, which
# took one string. This will throw with a new format
@@ -471,18 +431,13 @@ class bzmysql(bzaccess):
userid = self.get_user_id(defaultuser)
user = defaultuser
except KeyError:
- raise util.Abort(_('cannot find bugzilla user id for %s or %s')
- % (user, defaultuser))
+ raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
+ (user, defaultuser))
return (user, userid)
- def updatebug(self, bugid, newstate, text, committer):
- '''update bug state with comment text.
-
- Try adding comment as committer of changeset, otherwise as
- default bugzilla user.'''
- if len(newstate) > 0:
- self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
-
+ def add_comment(self, bugid, text, committer):
+ '''add comment to bug. try adding comment as committer of
+ changeset, otherwise as default bugzilla user.'''
(user, userid) = self.get_bugzilla_user(committer)
now = time.strftime('%Y-%m-%d %H:%M:%S')
self.run('''insert into longdescs
@@ -518,16 +473,17 @@ class bzmysql_3_0(bzmysql_2_18):
# Buzgilla via XMLRPC interface.
-class cookietransportrequest(object):
- """A Transport request method that retains cookies over its lifetime.
+class CookieSafeTransport(xmlrpclib.SafeTransport):
+ """A SafeTransport that retains cookies over its lifetime.
The regular xmlrpclib transports ignore cookies. Which causes
a bit of a problem when you need a cookie-based login, as with
the Bugzilla XMLRPC interface.
- So this is a helper for defining a Transport which looks for
- cookies being set in responses and saves them to add to all future
- requests.
+ So this is a SafeTransport which looks for cookies being set
+ in responses and saves them to add to all future requests.
+ It appears a SafeTransport can do both HTTP and HTTPS sessions,
+ which saves us having to do a CookieTransport too.
"""
# Inspiration drawn from
@@ -542,7 +498,6 @@ class cookietransportrequest(object):
def request(self, host, handler, request_body, verbose=0):
self.verbose = verbose
- self.accept_gzip_encoding = False
# issue XML-RPC request
h = self.make_connection(host)
@@ -582,20 +537,6 @@ class cookietransportrequest(object):
return unmarshaller.close()
-# The explicit calls to the underlying xmlrpclib __init__() methods are
-# necessary. The xmlrpclib.Transport classes are old-style classes, and
-# it turns out their __init__() doesn't get called when doing multiple
-# inheritance with a new-style class.
-class cookietransport(cookietransportrequest, xmlrpclib.Transport):
- def __init__(self, use_datetime=0):
- if util.safehasattr(xmlrpclib.Transport, "__init__"):
- xmlrpclib.Transport.__init__(self, use_datetime)
-
-class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
- def __init__(self, use_datetime=0):
- if util.safehasattr(xmlrpclib.Transport, "__init__"):
- xmlrpclib.SafeTransport.__init__(self, use_datetime)
-
class bzxmlrpc(bzaccess):
"""Support for access to Bugzilla via the Bugzilla XMLRPC API.
@@ -612,83 +553,41 @@ class bzxmlrpc(bzaccess):
user = self.ui.config('bugzilla', 'user', 'bugs')
passwd = self.ui.config('bugzilla', 'password')
- self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
- self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
- 'FIXED')
-
- self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
- ver = self.bzproxy.Bugzilla.version()['version'].split('.')
- self.bzvermajor = int(ver[0])
- self.bzverminor = int(ver[1])
+ self.bzproxy = xmlrpclib.ServerProxy(bzweb, CookieSafeTransport())
self.bzproxy.User.login(dict(login=user, password=passwd))
- def transport(self, uri):
- if urlparse.urlparse(uri, "http")[0] == "https":
- return cookiesafetransport()
- else:
- return cookietransport()
-
def get_bug_comments(self, id):
"""Return a string with all comment text for a bug."""
- c = self.bzproxy.Bug.comments(dict(ids=[id], include_fields=['text']))
+ c = self.bzproxy.Bug.comments(dict(ids=[id]))
return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
- def filter_real_bug_ids(self, bugs):
- probe = self.bzproxy.Bug.get(dict(ids=sorted(bugs.keys()),
- include_fields=[],
- permissive=True))
- for badbug in probe['faults']:
- id = badbug['id']
- self.ui.status(_('bug %d does not exist\n') % id)
- del bugs[id]
-
- def filter_cset_known_bug_ids(self, node, bugs):
- for id in sorted(bugs.keys()):
+ def filter_real_bug_ids(self, ids):
+ res = set()
+ bugs = self.bzproxy.Bug.get(dict(ids=sorted(ids), permissive=True))
+ for bug in bugs['bugs']:
+ res.add(bug['id'])
+ return res
+
+ def filter_cset_known_bug_ids(self, node, ids):
+ for id in sorted(ids):
if self.get_bug_comments(id).find(short(node)) != -1:
self.ui.status(_('bug %d already knows about changeset %s\n') %
(id, short(node)))
- del bugs[id]
-
- def updatebug(self, bugid, newstate, text, committer):
- args = {}
- if 'hours' in newstate:
- args['work_time'] = newstate['hours']
-
- if self.bzvermajor >= 4:
- args['ids'] = [bugid]
- args['comment'] = {'body' : text}
- if 'fix' in newstate:
- args['status'] = self.fixstatus
- args['resolution'] = self.fixresolution
- self.bzproxy.Bug.update(args)
- else:
- if 'fix' in newstate:
- self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
- "to mark bugs fixed\n"))
- args['id'] = bugid
- args['comment'] = text
- self.bzproxy.Bug.add_comment(args)
+ ids.discard(id)
+ return ids
+
+ def add_comment(self, bugid, text, committer):
+ self.bzproxy.Bug.add_comment(dict(id=bugid, comment=text))
class bzxmlrpcemail(bzxmlrpc):
"""Read data from Bugzilla via XMLRPC, send updates via email.
Advantages of sending updates via email:
1. Comments can be added as any user, not just logged in user.
- 2. Bug statuses or other fields not accessible via XMLRPC can
- potentially be updated.
-
- There is no XMLRPC function to change bug status before Bugzilla
- 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
- But bugs can be marked fixed via email from 3.4 onwards.
+ 2. Bug statuses and other fields not accessible via XMLRPC can
+ be updated. This is not currently used.
"""
- # The email interface changes subtly between 3.4 and 3.6. In 3.4,
- # in-email fields are specified as '@<fieldname> = <value>'. In
- # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
- # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
- # compatibility, but rather than rely on this use the new format for
- # 4.0 onwards.
-
def __init__(self, ui):
bzxmlrpc.__init__(self, ui)
@@ -697,14 +596,6 @@ class bzxmlrpcemail(bzxmlrpc):
raise util.Abort(_("configuration 'bzemail' missing"))
mail.validateconfig(self.ui)
- def makecommandline(self, fieldname, value):
- if self.bzvermajor >= 4:
- return "@%s %s" % (fieldname, str(value))
- else:
- if fieldname == "id":
- fieldname = "bug_id"
- return "@%s = %s" % (fieldname, str(value))
-
def send_bug_modify_email(self, bugid, commands, comment, committer):
'''send modification message to Bugzilla bug via email.
@@ -725,9 +616,8 @@ class bzxmlrpcemail(bzxmlrpc):
raise util.Abort(_("default bugzilla user %s email not found") %
user)
user = matches['users'][0]['email']
- commands.append(self.makecommandline("id", bugid))
- text = "\n".join(commands) + "\n\n" + comment
+ text = "\n".join(commands) + "\n@bug_id = %d\n\n" % bugid + comment
_charsets = mail._charsets(self.ui)
user = mail.addressencode(self.ui, user, _charsets)
@@ -739,14 +629,8 @@ class bzxmlrpcemail(bzxmlrpc):
sendmail = mail.connect(self.ui)
sendmail(user, bzemail, msg.as_string())
- def updatebug(self, bugid, newstate, text, committer):
- cmds = []
- if 'hours' in newstate:
- cmds.append(self.makecommandline("work_time", newstate['hours']))
- if 'fix' in newstate:
- cmds.append(self.makecommandline("bug_status", self.fixstatus))
- cmds.append(self.makecommandline("resolution", self.fixresolution))
- self.send_bug_modify_email(bugid, cmds, text, committer)
+ def add_comment(self, bugid, text, committer):
+ self.send_bug_modify_email(bugid, [], text, committer)
class bugzilla(object):
# supported versions of bugzilla. different versions have
@@ -760,13 +644,7 @@ class bugzilla(object):
}
_default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
- r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
- r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
-
- _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
- r'(?:nos?\.?|num(?:ber)?s?)?\s*'
- r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
- r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+ r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
_bz = None
@@ -792,76 +670,38 @@ class bugzilla(object):
return getattr(self.bz(), key)
_bug_re = None
- _fix_re = None
_split_re = None
- def find_bugs(self, ctx):
- '''return bugs dictionary created from commit comment.
+ def find_bug_ids(self, ctx):
+ '''return set of integer bug IDs from commit comment.
- Extract bug info from changeset comments. Filter out any that are
+ Extract bug IDs from changeset comments. Filter out any that are
not known to Bugzilla, and any that already have a reference to
the given changeset in their comments.
'''
if bugzilla._bug_re is None:
bugzilla._bug_re = re.compile(
- self.ui.config('bugzilla', 'regexp',
- bugzilla._default_bug_re), re.IGNORECASE)
- bugzilla._fix_re = re.compile(
- self.ui.config('bugzilla', 'fixregexp',
- bugzilla._default_fix_re), re.IGNORECASE)
+ self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
+ re.IGNORECASE)
bugzilla._split_re = re.compile(r'\D+')
start = 0
- hours = 0.0
- bugs = {}
- bugmatch = bugzilla._bug_re.search(ctx.description(), start)
- fixmatch = bugzilla._fix_re.search(ctx.description(), start)
+ ids = set()
while True:
- bugattribs = {}
- if not bugmatch and not fixmatch:
+ m = bugzilla._bug_re.search(ctx.description(), start)
+ if not m:
break
- if not bugmatch:
- m = fixmatch
- elif not fixmatch:
- m = bugmatch
- else:
- if bugmatch.start() < fixmatch.start():
- m = bugmatch
- else:
- m = fixmatch
start = m.end()
- if m is bugmatch:
- bugmatch = bugzilla._bug_re.search(ctx.description(), start)
- if 'fix' in bugattribs:
- del bugattribs['fix']
- else:
- fixmatch = bugzilla._fix_re.search(ctx.description(), start)
- bugattribs['fix'] = None
-
- try:
- ids = m.group('ids')
- except IndexError:
- ids = m.group(1)
- try:
- hours = float(m.group('hours'))
- bugattribs['hours'] = hours
- except IndexError:
- pass
- except TypeError:
- pass
- except ValueError:
- self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
-
- for id in bugzilla._split_re.split(ids):
+ for id in bugzilla._split_re.split(m.group(1)):
if not id:
continue
- bugs[int(id)] = bugattribs
- if bugs:
- self.filter_real_bug_ids(bugs)
- if bugs:
- self.filter_cset_known_bug_ids(ctx.node(), bugs)
- return bugs
-
- def update(self, bugid, newstate, ctx):
+ ids.add(int(id))
+ if ids:
+ ids = self.filter_real_bug_ids(ids)
+ if ids:
+ ids = self.filter_cset_known_bug_ids(ctx.node(), ids)
+ return ids
+
+ def update(self, bugid, ctx):
'''update bugzilla bug with reference to changeset.'''
def webroot(root):
@@ -894,7 +734,7 @@ class bugzilla(object):
root=self.repo.root,
webroot=webroot(self.repo.root))
data = self.ui.popbuffer()
- self.updatebug(bugid, newstate, data, util.email(ctx.user()))
+ self.add_comment(bugid, data, util.email(ctx.user()))
def hook(ui, repo, hooktype, node=None, **kwargs):
'''add comment to bugzilla for each changeset that refers to a
@@ -906,10 +746,11 @@ def hook(ui, repo, hooktype, node=None, **kwargs):
try:
bz = bugzilla(ui, repo)
ctx = repo[node]
- bugs = bz.find_bugs(ctx)
- if bugs:
- for bug in bugs:
- bz.update(bug, bugs[bug], ctx)
- bz.notify(bugs, util.email(ctx.user()))
+ ids = bz.find_bug_ids(ctx)
+ if ids:
+ for id in ids:
+ bz.update(id, ctx)
+ bz.notify(ids, util.email(ctx.user()))
except Exception, e:
raise util.Abort(_('Bugzilla error: %s') % e)
+
diff --git a/hgext/children.py b/hgext/children.py
index 7b477aa..da2fe9c 100644
--- a/hgext/children.py
+++ b/hgext/children.py
@@ -8,17 +8,12 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-'''command to display child changesets (DEPRECATED)
-
-This extension is deprecated. You should use :hg:`log -r
-"children(REV)"` instead.
-'''
+'''command to display child changesets'''
from mercurial import cmdutil
from mercurial.commands import templateopts
from mercurial.i18n import _
-testedwith = 'internal'
def children(ui, repo, file_=None, **opts):
"""show the children of the given or working directory revision
diff --git a/hgext/churn.py b/hgext/churn.py
index 29796f0..b4883ef 100644
--- a/hgext/churn.py
+++ b/hgext/churn.py
@@ -13,8 +13,6 @@ from mercurial import patch, cmdutil, scmutil, util, templater, commands
import os
import time, datetime
-testedwith = 'internal'
-
def maketemplater(ui, repo, tmpl):
tmpl = templater.parsestring(tmpl, quoted=False)
try:
@@ -69,7 +67,7 @@ def countrate(ui, repo, amap, *pats, **opts):
else:
parents = ctx.parents()
if len(parents) > 1:
- ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
+ ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
return
ctx1 = parents[0]
@@ -137,7 +135,7 @@ def churn(ui, repo, *pats, **opts):
except ValueError:
l = l.strip()
if l:
- ui.warn(_("skipping malformed alias: %s\n") % l)
+ ui.warn(_("skipping malformed alias: %s\n" % l))
continue
rate = countrate(ui, repo, amap, *pats, **opts).items()
diff --git a/hgext/color.py b/hgext/color.py
index 22ef360..a4fe16a 100644
--- a/hgext/color.py
+++ b/hgext/color.py
@@ -2,8 +2,19 @@
#
# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''colorize output from some commands
@@ -57,9 +68,6 @@ Default effects may be overridden from your configuration file::
branches.current = green
branches.inactive = none
- tags.normal = green
- tags.local = black bold
-
The available effects in terminfo mode are 'blink', 'bold', 'dim',
'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
@@ -105,8 +113,6 @@ import os
from mercurial import commands, dispatch, extensions, ui as uimod, util
from mercurial.i18n import _
-testedwith = 'internal'
-
# start and stop parameters for effects
_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
@@ -251,9 +257,7 @@ _styles = {'grep.match': 'red bold',
'status.ignored': 'black bold',
'status.modified': 'blue bold',
'status.removed': 'red bold',
- 'status.unknown': 'magenta bold underline',
- 'tags.normal': 'green',
- 'tags.local': 'black bold'}
+ 'status.unknown': 'magenta bold underline'}
def _effect_str(effect):
diff --git a/hgext/convert/__init__.py b/hgext/convert/__init__.py
index e53c82c..abaa68a 100644
--- a/hgext/convert/__init__.py
+++ b/hgext/convert/__init__.py
@@ -13,8 +13,6 @@ import subversion
from mercurial import commands, templatekw
from mercurial.i18n import _
-testedwith = 'internal'
-
# Commands definition was moved elsewhere to ease demandload job.
def convert(ui, src, dest=None, revmapfile=None, **opts):
@@ -138,7 +136,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
repository from "default" to a named branch.
Mercurial Source
- ################
+ ''''''''''''''''
The Mercurial source recognizes the following configuration
options, which you can set on the command line with ``--config``:
@@ -148,14 +146,14 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
converting from and to Mercurial. Default is False.
:convert.hg.saverev: store original revision ID in changeset
- (forces target IDs to change). It takes a boolean argument and
- defaults to False.
+ (forces target IDs to change). It takes and boolean argument
+ and defaults to False.
:convert.hg.startrev: convert start revision and its descendants.
It takes a hg revision identifier and defaults to 0.
CVS Source
- ##########
+ ''''''''''
CVS source will use a sandbox (i.e. a checked-out copy) from CVS
to indicate the starting point of what will be converted. Direct
@@ -197,7 +195,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
delete them.
:hook.cvschangesets: Specify a Python function to be called after
- the changesets are calculated from the CVS log. The
+ the changesets are calculated from the the CVS log. The
function is passed a list with the changeset entries, and can
modify the changesets in-place, or add or delete them.
@@ -207,7 +205,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
the command help for more details.
Subversion Source
- #################
+ '''''''''''''''''
Subversion source detects classical trunk/branches/tags layouts.
By default, the supplied ``svn://repo/path/`` source URL is
@@ -239,7 +237,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
The default is 0.
Perforce Source
- ###############
+ '''''''''''''''
The Perforce (P4) importer can be given a p4 depot path or a
client specification as source. It will convert all files in the
@@ -255,7 +253,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
Perforce changelist number).
Mercurial Destination
- #####################
+ '''''''''''''''''''''
The following options are supported:
@@ -330,8 +328,7 @@ cmdtable = {
('', 'root', '', _('specify cvsroot')),
# Options specific to builtin cvsps
('', 'parents', '', _('show parent changesets')),
- ('', 'ancestors', '',
- _('show current changeset in ancestor branches')),
+ ('', 'ancestors', '', _('show current changeset in ancestor branches')),
# Options that are ignored for compatibility with cvsps-2.1
('A', 'cvs-direct', None, _('ignored for compatibility')),
],
diff --git a/hgext/convert/bzr.py b/hgext/convert/bzr.py
index 5eef902..cc16258 100644
--- a/hgext/convert/bzr.py
+++ b/hgext/convert/bzr.py
@@ -23,7 +23,7 @@ from common import NoRepo, commit, converter_source
try:
# bazaar imports
- from bzrlib import bzrdir, revision, errors
+ from bzrlib import branch, revision, errors
from bzrlib.revisionspec import RevisionSpec
except ImportError:
pass
@@ -42,17 +42,14 @@ class bzr_source(converter_source):
try:
# access bzrlib stuff
- bzrdir
+ branch
except NameError:
raise NoRepo(_('Bazaar modules could not be loaded'))
path = os.path.abspath(path)
self._checkrepotype(path)
- try:
- self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
- except errors.NoRepositoryPresent:
- raise NoRepo(_('%s does not look like a Bazaar repository')
- % path)
+ self.branch = branch.Branch.open(path)
+ self.sourcerepo = self.branch.repository
self._parentids = {}
def _checkrepotype(self, path):
@@ -72,7 +69,7 @@ class bzr_source(converter_source):
self.ui.warn(_('warning: lightweight checkouts may cause '
'conversion failures, try with a regular '
'branch instead.\n'))
- except Exception:
+ except:
self.ui.note(_('bzr source type could not be determined\n'))
def before(self):
@@ -91,28 +88,16 @@ class bzr_source(converter_source):
def after(self):
self.sourcerepo.unlock()
- def _bzrbranches(self):
- return self.sourcerepo.find_branches(using=True)
-
def getheads(self):
if not self.rev:
- # Set using=True to avoid nested repositories (see issue3254)
- heads = sorted([b.last_revision() for b in self._bzrbranches()])
- else:
- revid = None
- for branch in self._bzrbranches():
- try:
- r = RevisionSpec.from_string(self.rev)
- info = r.in_history(branch)
- except errors.BzrError:
- pass
- revid = info.rev_id
- if revid is None:
- raise util.Abort(_('%s is not a valid revision') % self.rev)
- heads = [revid]
- # Empty repositories return 'null:', which cannot be retrieved
- heads = [h for h in heads if h != 'null:']
- return heads
+ return [self.branch.last_revision()]
+ try:
+ r = RevisionSpec.from_string(self.rev)
+ info = r.in_history(self.branch)
+ except errors.BzrError:
+ raise util.Abort(_('%s is not a valid revision in current branch')
+ % self.rev)
+ return [info.rev_id]
def getfile(self, name, rev):
revtree = self.sourcerepo.revision_tree(rev)
@@ -155,24 +140,20 @@ class bzr_source(converter_source):
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
- branch = self.recode(rev.properties.get('branch-nick', u'default'))
- if branch == 'trunk':
- branch = 'default'
return commit(parents=parents,
date='%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
+ # bzr returns bytestrings or unicode, depending on the content
desc=self.recode(rev.message),
- branch=branch,
rev=version)
def gettags(self):
+ if not self.branch.supports_tags():
+ return {}
+ tagdict = self.branch.tags.get_tag_dict()
bytetags = {}
- for branch in self._bzrbranches():
- if not branch.supports_tags():
- return {}
- tagdict = branch.tags.get_tag_dict()
- for name, rev in tagdict.iteritems():
- bytetags[self.recode(name)] = rev
+ for name, rev in tagdict.iteritems():
+ bytetags[self.recode(name)] = rev
return bytetags
def getchangedfiles(self, rev, i):
@@ -192,14 +173,8 @@ class bzr_source(converter_source):
revid = current._revision_id
changes = []
renames = {}
- seen = set()
- # Process the entries by reverse lexicographic name order to
- # handle nested renames correctly, most specific first.
- curchanges = sorted(current.iter_changes(origin),
- key=lambda c: c[1][0] or c[1][1],
- reverse=True)
for (fileid, paths, changed_content, versioned, parent, name,
- kind, executable) in curchanges:
+ kind, executable) in current.iter_changes(origin):
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
@@ -213,8 +188,7 @@ class bzr_source(converter_source):
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
- if kind[0] == 'directory' and None not in paths:
- renaming = paths[0] != paths[1]
+ if None not in paths and paths[0] != paths[1]:
# neither an add nor an delete - a move
# rename all directory contents manually
subdir = origin.inventory.path2id(paths[0])
@@ -224,16 +198,6 @@ class bzr_source(converter_source):
if entry.kind == 'directory':
continue
frompath = self.recode(paths[0] + '/' + name)
- if frompath in seen:
- # Already handled by a more specific change entry
- # This is important when you have:
- # a => b
- # a/c => a/c
- # Here a/c must not be renamed into b/c
- continue
- seen.add(frompath)
- if not renaming:
- continue
topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
@@ -250,12 +214,7 @@ class bzr_source(converter_source):
continue
# we got unicode paths, need to convert them
- path, topath = paths
- if path is not None:
- path = self.recode(path)
- if topath is not None:
- topath = self.recode(topath)
- seen.add(path or topath)
+ path, topath = [self.recode(part) for part in paths]
if topath is None:
# file deleted
@@ -283,3 +242,19 @@ class bzr_source(converter_source):
parentmap = self.sourcerepo.get_parent_map(ids)
parents = tuple([parent for parent in ids if parent in parentmap])
return parents
+
+ def recode(self, s, encoding=None):
+ """This version of recode tries to encode unicode to bytecode,
+ and preferably using the UTF-8 codec.
+ Other types than Unicode are silently returned, this is by
+ intention, e.g. the None-type is not going to be encoded but instead
+ just passed through
+ """
+ if not encoding:
+ encoding = self.encoding or 'utf-8'
+
+ if isinstance(s, unicode):
+ return s.encode(encoding)
+ else:
+ # leave it alone
+ return s
diff --git a/hgext/convert/common.py b/hgext/convert/common.py
index e30ef2d..29b01b5 100644
--- a/hgext/convert/common.py
+++ b/hgext/convert/common.py
@@ -11,8 +11,6 @@ import cPickle as pickle
from mercurial import util
from mercurial.i18n import _
-propertycache = util.propertycache
-
def encodeargs(args):
def encodearg(s):
lines = base64.encodestring(s)
@@ -76,7 +74,7 @@ class converter_source(object):
def getheads(self):
"""Return a list of this repository's heads"""
- raise NotImplementedError
+ raise NotImplementedError()
def getfile(self, name, rev):
"""Return a pair (data, mode) where data is the file content
@@ -84,7 +82,7 @@ class converter_source(object):
identifier returned by a previous call to getchanges(). Raise
IOError to indicate that name was deleted in rev.
"""
- raise NotImplementedError
+ raise NotImplementedError()
def getchanges(self, version):
"""Returns a tuple of (files, copies).
@@ -95,18 +93,18 @@ class converter_source(object):
copies is a dictionary of dest: source
"""
- raise NotImplementedError
+ raise NotImplementedError()
def getcommit(self, version):
"""Return the commit object for version"""
- raise NotImplementedError
+ raise NotImplementedError()
def gettags(self):
"""Return the tags as a dictionary of name: revision
Tag names must be UTF-8 strings.
"""
- raise NotImplementedError
+ raise NotImplementedError()
def recode(self, s, encoding=None):
if not encoding:
@@ -116,10 +114,10 @@ class converter_source(object):
return s.encode("utf-8")
try:
return s.decode(encoding).encode("utf-8")
- except UnicodeError:
+ except:
try:
return s.decode("latin-1").encode("utf-8")
- except UnicodeError:
+ except:
return s.decode(encoding, "replace").encode("utf-8")
def getchangedfiles(self, rev, i):
@@ -133,7 +131,7 @@ class converter_source(object):
This function is only needed to support --filemap
"""
- raise NotImplementedError
+ raise NotImplementedError()
def converted(self, rev, sinkrev):
'''Notify the source that a revision has been converted.'''
@@ -175,13 +173,13 @@ class converter_sink(object):
def getheads(self):
"""Return a list of this repository's heads"""
- raise NotImplementedError
+ raise NotImplementedError()
def revmapfile(self):
"""Path to a file that will contain lines
source_rev_id sink_rev_id
mapping equivalent revision identifiers for each system."""
- raise NotImplementedError
+ raise NotImplementedError()
def authorfile(self):
"""Path to a file that will contain lines
@@ -203,7 +201,7 @@ class converter_sink(object):
a particular revision (or even what that revision would be)
before it receives the file data.
"""
- raise NotImplementedError
+ raise NotImplementedError()
def puttags(self, tags):
"""Put tags into sink.
@@ -212,7 +210,7 @@ class converter_sink(object):
Return a pair (tag_revision, tag_parent_revision), or (None, None)
if nothing was changed.
"""
- raise NotImplementedError
+ raise NotImplementedError()
def setbranch(self, branch, pbranches):
"""Set the current branch name. Called before the first putcommit
@@ -245,10 +243,6 @@ class converter_sink(object):
"""
pass
- def hascommit(self, rev):
- """Return True if the sink contains rev"""
- raise NotImplementedError
-
class commandline(object):
def __init__(self, ui, command):
self.ui = ui
@@ -327,13 +321,15 @@ class commandline(object):
self.checkexit(status, ''.join(output))
return output
- @propertycache
- def argmax(self):
+ def getargmax(self):
+ if '_argmax' in self.__dict__:
+ return self._argmax
+
# POSIX requires at least 4096 bytes for ARG_MAX
- argmax = 4096
+ self._argmax = 4096
try:
- argmax = os.sysconf("SC_ARG_MAX")
- except (AttributeError, ValueError):
+ self._argmax = os.sysconf("SC_ARG_MAX")
+ except:
pass
# Windows shells impose their own limits on command line length,
@@ -343,11 +339,13 @@ class commandline(object):
# Since ARG_MAX is for command line _and_ environment, lower our limit
# (and make happy Windows shells while doing this).
- return argmax // 2 - 1
+
+ self._argmax = self._argmax / 2 - 1
+ return self._argmax
def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs):
cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs))
- limit = self.argmax - cmdlen
+ limit = self.getargmax() - cmdlen
bytes = 0
fl = []
for fn in arglist:
@@ -385,12 +383,8 @@ class mapfile(dict):
raise
return
for i, line in enumerate(fp):
- line = line.splitlines()[0].rstrip()
- if not line:
- # Ignore blank lines
- continue
try:
- key, value = line.rsplit(' ', 1)
+ key, value = line.splitlines()[0].rsplit(' ', 1)
except ValueError:
raise util.Abort(
_('syntax error in %s(%d): key/value pair expected')
@@ -415,31 +409,3 @@ class mapfile(dict):
if self.fp:
self.fp.close()
self.fp = None
-
-def parsesplicemap(path):
- """Parse a splicemap, return a child/parents dictionary."""
- if not path:
- return {}
- m = {}
- try:
- fp = open(path, 'r')
- for i, line in enumerate(fp):
- line = line.splitlines()[0].rstrip()
- if not line:
- # Ignore blank lines
- continue
- try:
- child, parents = line.split(' ', 1)
- parents = parents.replace(',', ' ').split()
- except ValueError:
- raise util.Abort(_('syntax error in %s(%d): child parent1'
- '[,parent2] expected') % (path, i + 1))
- pp = []
- for p in parents:
- if p not in pp:
- pp.append(p)
- m[child] = pp
- except IOError, e:
- if e.errno != errno.ENOENT:
- raise
- return m
diff --git a/hgext/convert/convcmd.py b/hgext/convert/convcmd.py
index c8fe845..578272a 100644
--- a/hgext/convert/convcmd.py
+++ b/hgext/convert/convcmd.py
@@ -15,7 +15,7 @@ from monotone import monotone_source
from gnuarch import gnuarch_source
from bzr import bzr_source
from p4 import p4_source
-import filemap, common
+import filemap
import os, shutil
from mercurial import hg, util, encoding
@@ -118,7 +118,7 @@ class converter(object):
self.readauthormap(opts.get('authormap'))
self.authorfile = self.dest.authorfile()
- self.splicemap = common.parsesplicemap(opts.get('splicemap'))
+ self.splicemap = mapfile(ui, opts.get('splicemap'))
self.branchmap = mapfile(ui, opts.get('branchmap'))
def walktree(self, heads):
@@ -142,29 +142,6 @@ class converter(object):
return parents
- def mergesplicemap(self, parents, splicemap):
- """A splicemap redefines child/parent relationships. Check the
- map contains valid revision identifiers and merge the new
- links in the source graph.
- """
- for c in splicemap:
- if c not in parents:
- if not self.dest.hascommit(self.map.get(c, c)):
- # Could be in source but not converted during this run
- self.ui.warn(_('splice map revision %s is not being '
- 'converted, ignoring\n') % c)
- continue
- pc = []
- for p in splicemap[c]:
- # We do not have to wait for nodes already in dest.
- if self.dest.hascommit(self.map.get(p, p)):
- continue
- # Parent is not in dest and not being converted, not good
- if p not in parents:
- raise util.Abort(_('unknown splice map parent: %s') % p)
- pc.append(p)
- parents[c] = pc
-
def toposort(self, parents, sortmode):
'''Return an ordering such that every uncommitted changeset is
preceeded by all its uncommitted ancestors.'''
@@ -190,7 +167,7 @@ class converter(object):
children.setdefault(n, [])
hasparent = False
for p in parents[n]:
- if p not in self.map:
+ if not p in self.map:
visit.append(p)
hasparent = True
children.setdefault(p, []).append(n)
@@ -280,7 +257,7 @@ class converter(object):
def writeauthormap(self):
authorfile = self.authorfile
if authorfile:
- self.ui.status(_('writing author map file %s\n') % authorfile)
+ self.ui.status(_('Writing author map file %s\n') % authorfile)
ofile = open(authorfile, 'w+')
for author in self.authors:
ofile.write("%s=%s\n" % (author, self.authors[author]))
@@ -297,7 +274,7 @@ class converter(object):
try:
srcauthor, dstauthor = line.split('=', 1)
except ValueError:
- msg = _('ignoring bad line in author map file %s: %s\n')
+ msg = _('Ignoring bad line in author map file %s: %s\n')
self.ui.warn(msg % (authorfile, line.rstrip()))
continue
@@ -342,7 +319,7 @@ class converter(object):
self.commitcache[prev].branch))
self.dest.setbranch(commit.branch, pbranches)
try:
- parents = self.splicemap[rev]
+ parents = self.splicemap[rev].replace(',', ' ').split()
self.ui.status(_('spliced in %s as parents of %s\n') %
(parents, rev))
parents = [self.map.get(p, p) for p in parents]
@@ -363,7 +340,6 @@ class converter(object):
self.ui.status(_("scanning source...\n"))
heads = self.source.getheads()
parents = self.walktree(heads)
- self.mergesplicemap(parents, self.splicemap)
self.ui.status(_("sorting...\n"))
t = self.toposort(parents, sortmode)
num = len(t)
@@ -462,7 +438,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
if not revmapfile:
try:
revmapfile = destc.revmapfile()
- except Exception:
+ except:
revmapfile = os.path.join(destc, "map")
c = converter(ui, srcc, destc, revmapfile, opts)
diff --git a/hgext/convert/cvs.py b/hgext/convert/cvs.py
index 38b1d34..d07ea20 100644
--- a/hgext/convert/cvs.py
+++ b/hgext/convert/cvs.py
@@ -70,7 +70,7 @@ class convert_cvs(converter_source):
cs.author = self.recode(cs.author)
self.lastbranch[cs.branch] = id
cs.comment = self.recode(cs.comment)
- date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
+ date = util.datestr(cs.date)
self.tags.update(dict.fromkeys(cs.tags, id))
files = {}
@@ -121,13 +121,12 @@ class convert_cvs(converter_source):
pf = open(cvspass)
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
- # /1 :pserver:user@example.com:2401/cvsroot/foo
- # Ah<Z
if part1 == '/1':
+ # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
part1, part2 = part2.split(' ', 1)
format = format1
- # :pserver:user@example.com:/cvsroot/foo Ah<Z
else:
+ # :pserver:user@example.com:/cvsroot/foo Ah<Z
format = format0
if part1 == format:
passw = part2
diff --git a/hgext/convert/cvsps.py b/hgext/convert/cvsps.py
index 97184d5..1519d41 100644
--- a/hgext/convert/cvsps.py
+++ b/hgext/convert/cvsps.py
@@ -11,7 +11,6 @@ import cPickle as pickle
from mercurial import util
from mercurial.i18n import _
from mercurial import hook
-from mercurial import util
class logentry(object):
'''Class logentry has the following attributes:
@@ -336,8 +335,7 @@ def createlog(ui, directory=None, root="", rlog=True, cache=None):
else:
myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
branches = [b for b in branchmap if branchmap[b] == myrev]
- assert len(branches) == 1, ('unknown branch: %s'
- % e.mergepoint)
+ assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
e.mergepoint = branches[0]
else:
e.mergepoint = None
@@ -364,14 +362,8 @@ def createlog(ui, directory=None, root="", rlog=True, cache=None):
elif state == 8:
# store commit log message
if re_31.match(line):
- cpeek = peek
- if cpeek.endswith('\n'):
- cpeek = cpeek[:-1]
- if re_50.match(cpeek):
- state = 5
- store = True
- else:
- e.comment.append(line)
+ state = 5
+ store = True
elif re_32.match(line):
state = 0
store = True
@@ -521,8 +513,8 @@ def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
e.comment == c.comment and
e.author == c.author and
e.branch == c.branch and
- (not util.safehasattr(e, 'branchpoints') or
- not util.safehasattr (c, 'branchpoints') or
+ (not hasattr(e, 'branchpoints') or
+ not hasattr (c, 'branchpoints') or
e.branchpoints == c.branchpoints) and
((c.date[0] + c.date[1]) <=
(e.date[0] + e.date[1]) <=
@@ -557,25 +549,27 @@ def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
# Sort files in each changeset
- def entitycompare(l, r):
- 'Mimic cvsps sorting order'
- l = l.file.split('/')
- r = r.file.split('/')
- nl = len(l)
- nr = len(r)
- n = min(nl, nr)
- for i in range(n):
- if i + 1 == nl and nl < nr:
- return -1
- elif i + 1 == nr and nl > nr:
- return +1
- elif l[i] < r[i]:
- return -1
- elif l[i] > r[i]:
- return +1
- return 0
-
for c in changesets:
+ def pathcompare(l, r):
+ 'Mimic cvsps sorting order'
+ l = l.split('/')
+ r = r.split('/')
+ nl = len(l)
+ nr = len(r)
+ n = min(nl, nr)
+ for i in range(n):
+ if i + 1 == nl and nl < nr:
+ return -1
+ elif i + 1 == nr and nl > nr:
+ return +1
+ elif l[i] < r[i]:
+ return -1
+ elif l[i] > r[i]:
+ return +1
+ return 0
+ def entitycompare(l, r):
+ return pathcompare(l.file, r.file)
+
c.entries.sort(entitycompare)
# Sort changesets by date
@@ -706,11 +700,11 @@ def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
if mergeto:
m = mergeto.search(c.comment)
if m:
- if m.groups():
+ try:
m = m.group(1)
if m == 'HEAD':
m = None
- else:
+ except:
m = None # if no group found then merge to HEAD
if m in branches and c.branch != m:
# insert empty changeset for merge
diff --git a/hgext/convert/darcs.py b/hgext/convert/darcs.py
index b10a533..38d79ba 100644
--- a/hgext/convert/darcs.py
+++ b/hgext/convert/darcs.py
@@ -24,7 +24,7 @@ except ImportError:
try:
from elementtree.ElementTree import ElementTree, XMLParser
except ImportError:
- pass
+ ElementTree = None
class darcs_source(converter_source, commandline):
def __init__(self, ui, path, rev=None):
@@ -42,7 +42,7 @@ class darcs_source(converter_source, commandline):
raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
version)
- if "ElementTree" not in globals():
+ if ElementTree is None:
raise util.Abort(_("Python ElementTree module is not available"))
self.path = os.path.realpath(path)
@@ -139,7 +139,7 @@ class darcs_source(converter_source, commandline):
# etree can return unicode objects for name, comment, and author,
# so recode() is used to ensure str objects are emitted.
return commit(author=self.recode(elt.get('author')),
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ date=util.datestr(date),
desc=self.recode(desc).strip(),
parents=self.parents[rev])
diff --git a/hgext/convert/filemap.py b/hgext/convert/filemap.py
index c14df16..34033c7 100644
--- a/hgext/convert/filemap.py
+++ b/hgext/convert/filemap.py
@@ -99,8 +99,6 @@ class filemapper(object):
if newpre == '.':
return suf
if suf:
- if newpre.endswith('/'):
- return newpre + suf
return newpre + '/' + suf
return newpre
return name
@@ -294,34 +292,23 @@ class filemap_source(converter_source):
# A parent p is interesting if its mapped version (self.parentmap[p]):
# - is not SKIPREV
# - is still not in the list of parents (we don't want duplicates)
- # - is not an ancestor of the mapped versions of the other parents or
- # there is no parent in the same branch than the current revision.
+ # - is not an ancestor of the mapped versions of the other parents
mparents = []
- knownparents = set()
- branch = self.commits[rev].branch
- hasbranchparent = False
+ wp = None
for i, p1 in enumerate(parents):
mp1 = self.parentmap[p1]
- if mp1 == SKIPREV or mp1 in knownparents:
+ if mp1 == SKIPREV or mp1 in mparents:
continue
- isancestor = util.any(p2 for p2 in parents
- if p1 != p2 and mp1 != self.parentmap[p2]
- and mp1 in self.wantedancestors[p2])
- if not isancestor and not hasbranchparent and len(parents) > 1:
- # This could be expensive, avoid unnecessary calls.
- if self._cachedcommit(p1).branch == branch:
- hasbranchparent = True
- mparents.append((p1, mp1, i, isancestor))
- knownparents.add(mp1)
- # Discard parents ancestors of other parents if there is a
- # non-ancestor one on the same branch than current revision.
- if hasbranchparent:
- mparents = [p for p in mparents if not p[3]]
- wp = None
- if mparents:
- wp = max(p[2] for p in mparents)
- mparents = [p[1] for p in mparents]
- elif parents:
+ for p2 in parents:
+ if p1 == p2 or mp1 == self.parentmap[p2]:
+ continue
+ if mp1 in self.wantedancestors[p2]:
+ break
+ else:
+ mparents.append(mp1)
+ wp = i
+
+ if wp is None and parents:
wp = 0
self.origparents[rev] = parents
@@ -330,6 +317,7 @@ class filemap_source(converter_source):
if 'close' in self.commits[rev].extra:
# A branch closing revision is only useful if one of its
# parents belong to the branch being closed
+ branch = self.commits[rev].branch
pbranches = [self._cachedcommit(p).branch for p in mparents]
if branch in pbranches:
closed = True
@@ -357,12 +345,13 @@ class filemap_source(converter_source):
# able to get the files later on in getfile, we hide the
# original filename in the rev part of the return value.
changes, copies = self.base.getchanges(rev)
- files = {}
+ newnames = {}
+ files = []
for f, r in changes:
newf = self.filemapper(f)
- if newf and (newf != f or newf not in files):
- files[newf] = (f, r)
- files = sorted(files.items())
+ if newf:
+ files.append((newf, (f, r)))
+ newnames[f] = newf
ncopies = {}
for c in copies:
@@ -386,6 +375,3 @@ class filemap_source(converter_source):
def lookuprev(self, rev):
return self.base.lookuprev(rev)
-
- def getbookmarks(self):
- return self.base.getbookmarks()
diff --git a/hgext/convert/git.py b/hgext/convert/git.py
index 8058399..e35e128 100644
--- a/hgext/convert/git.py
+++ b/hgext/convert/git.py
@@ -16,7 +16,7 @@ class convert_git(converter_source):
# Windows does not support GIT_DIR= construct while other systems
# cannot remove environment variable. Just assume none have
# both issues.
- if util.safehasattr(os, 'unsetenv'):
+ if hasattr(os, 'unsetenv'):
def gitopen(self, s, noerr=False):
prevgitdir = os.environ.get('GIT_DIR')
os.environ['GIT_DIR'] = self.path
@@ -69,7 +69,7 @@ class convert_git(converter_source):
def catfile(self, rev, type):
if rev == hex(nullid):
- raise IOError
+ raise IOError()
data, ret = self.gitread("git cat-file %s %s" % (type, rev))
if ret:
raise util.Abort(_('cannot read %r object at %s') % (type, rev))
@@ -97,8 +97,6 @@ class convert_git(converter_source):
seen.add(f)
entry = entry.split()
h = entry[3]
- if entry[1] == '160000':
- raise util.Abort('git submodules are not supported!')
p = (entry[1] == "100755")
s = (entry[1] == "120000")
self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
@@ -145,30 +143,20 @@ class convert_git(converter_source):
def gettags(self):
tags = {}
- alltags = {}
fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
prefix = 'refs/tags/'
-
- # Build complete list of tags, both annotated and bare ones
for line in fh:
line = line.strip()
+ if not line.endswith("^{}"):
+ continue
node, tag = line.split(None, 1)
if not tag.startswith(prefix):
continue
- alltags[tag[len(prefix):]] = node
+ tag = tag[len(prefix):-3]
+ tags[tag] = node
if fh.close():
raise util.Abort(_('cannot read tags from %s') % self.path)
- # Filter out tag objects for annotated tag refs
- for tag in alltags:
- if tag.endswith('^{}'):
- tags[tag[:-3]] = alltags[tag]
- else:
- if tag + '^{}' in alltags:
- continue
- else:
- tags[tag] = alltags[tag]
-
return tags
def getchangedfiles(self, version, i):
@@ -181,8 +169,8 @@ class convert_git(converter_source):
m, f = l[:-1].split("\t")
changes.append(f)
else:
- fh = self.gitopen('git diff-tree --name-only --root -r %s '
- '"%s^%s" --' % (version, version, i + 1))
+ fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
+ % (version, version, i + 1))
changes = [f.rstrip('\n') for f in fh]
if fh.close():
raise util.Abort(_('cannot read changes in %s') % version)
@@ -211,7 +199,7 @@ class convert_git(converter_source):
continue
name = '%s%s' % (reftype, name[prefixlen:])
bookmarks[name] = rev
- except Exception:
+ except:
pass
return bookmarks
diff --git a/hgext/convert/hg.py b/hgext/convert/hg.py
index 287c771..26c43a5 100644
--- a/hgext/convert/hg.py
+++ b/hgext/convert/hg.py
@@ -70,10 +70,10 @@ class mercurial_sink(converter_sink):
self.wlock.release()
def revmapfile(self):
- return self.repo.join("shamap")
+ return os.path.join(self.path, ".hg", "shamap")
def authorfile(self):
- return self.repo.join("authormap")
+ return os.path.join(self.path, ".hg", "authormap")
def getheads(self):
h = self.repo.changelog.heads()
@@ -95,7 +95,7 @@ class mercurial_sink(converter_sink):
self.after()
try:
self.repo = hg.repository(self.ui, branchpath)
- except Exception:
+ except:
self.repo = hg.repository(self.ui, branchpath, create=True)
self.before()
@@ -105,7 +105,7 @@ class mercurial_sink(converter_sink):
for b in pbranches:
try:
self.repo.lookup(b[0])
- except Exception:
+ except:
missings.setdefault(b[1], []).append(b[0])
if missings:
@@ -178,7 +178,7 @@ class mercurial_sink(converter_sink):
closed = 'close' in commit.extra
if not closed and not man.cmp(m1node, man.revision(mnode)):
self.ui.status(_("filtering out empty revision\n"))
- self.repo.rollback(force=True)
+ self.repo.rollback()
return parent
return p2
@@ -192,7 +192,7 @@ class mercurial_sink(converter_sink):
try:
oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
- except Exception:
+ except:
oldlines = []
newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
@@ -223,12 +223,6 @@ class mercurial_sink(converter_sink):
self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark])
bookmarks.write(self.repo)
- def hascommit(self, rev):
- if rev not in self.repo and self.clonebranches:
- raise util.Abort(_('revision %s not found in destination '
- 'repository (lookups with clonebranches=true '
- 'are not implemented)') % rev)
- return rev in self.repo
class mercurial_source(converter_source):
def __init__(self, ui, path, rev=None):
@@ -241,7 +235,7 @@ class mercurial_source(converter_source):
# try to provoke an exception if this isn't really a hg
# repo, but some other bogus compatible-looking url
if not self.repo.local():
- raise error.RepoError
+ raise error.RepoError()
except error.RepoError:
ui.traceback()
raise NoRepo(_("%s is not a local Mercurial repository") % path)
@@ -259,7 +253,7 @@ class mercurial_source(converter_source):
% startnode)
startrev = self.repo.changelog.rev(startnode)
children = {startnode: 1}
- for rev in self.repo.changelog.descendants([startrev]):
+ for rev in self.repo.changelog.descendants(startrev):
children[self.repo.changelog.node(rev)] = 1
self.keep = children.__contains__
else:
@@ -294,8 +288,7 @@ class mercurial_source(converter_source):
if not parents:
files = sorted(ctx.manifest())
# getcopies() is not needed for roots, but it is a simple way to
- # detect missing revlogs and abort on errors or populate
- # self.ignored
+ # detect missing revlogs and abort on errors or populate self.ignored
self.getcopies(ctx, parents, files)
return [(f, rev) for f in files if f not in self.ignored], {}
if self._changescache and self._changescache[0] == rev:
@@ -343,8 +336,7 @@ class mercurial_source(converter_source):
crev = rev
else:
crev = None
- return commit(author=ctx.user(),
- date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
+ return commit(author=ctx.user(), date=util.datestr(ctx.date()),
desc=ctx.description(), rev=crev, parents=parents,
branch=ctx.branch(), extra=ctx.extra(),
sortkey=ctx.rev())
@@ -372,7 +364,8 @@ class mercurial_source(converter_source):
def converted(self, rev, destrev):
if self.convertfp is None:
- self.convertfp = open(self.repo.join('shamap'), 'a')
+ self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
+ 'a')
self.convertfp.write('%s %s\n' % (destrev, rev))
self.convertfp.flush()
diff --git a/hgext/convert/monotone.py b/hgext/convert/monotone.py
index 969e0e5..c951089 100644
--- a/hgext/convert/monotone.py
+++ b/hgext/convert/monotone.py
@@ -30,7 +30,7 @@ class monotone_source(converter_source, commandline):
f = file(path, 'rb')
header = f.read(16)
f.close()
- except IOError:
+ except:
header = ''
if header != 'SQLite format 3\x00':
raise norepo
@@ -113,7 +113,7 @@ class monotone_source(converter_source, commandline):
stream = self.mtnreadfp.read(1)
if stream not in 'mewptl':
- raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
+ raise util.Abort(_('bad mtn packet - bad stream type %s' % stream))
read = self.mtnreadfp.read(1)
if read != ':':
@@ -283,11 +283,11 @@ class monotone_source(converter_source, commandline):
def getfile(self, name, rev):
if not self.mtnisfile(name, rev):
- raise IOError # file was deleted or renamed
+ raise IOError() # file was deleted or renamed
try:
data = self.mtnrun("get_file_of", name, r=rev)
- except Exception:
- raise IOError # file was deleted or renamed
+ except:
+ raise IOError() # file was deleted or renamed
self.mtnloadmanifest(rev)
node, attr = self.files.get(name, (None, ""))
return data, attr
@@ -317,7 +317,7 @@ class monotone_source(converter_source, commandline):
def getchangedfiles(self, rev, i):
# This function is only needed to support --filemap
# ... and we don't support that
- raise NotImplementedError
+ raise NotImplementedError()
def before(self):
# Check if we have a new enough version to use automate stdio
diff --git a/hgext/convert/p4.py b/hgext/convert/p4.py
index 76b28e7..5d640ad 100644
--- a/hgext/convert/p4.py
+++ b/hgext/convert/p4.py
@@ -119,8 +119,7 @@ class p4_source(converter_source):
parents = []
date = (int(d["time"]), 0) # timezone not set
- c = commit(author=self.recode(d["user"]),
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ c = commit(author=self.recode(d["user"]), date=util.datestr(date),
parents=parents, desc=desc, branch='',
extra={"p4": change})
diff --git a/hgext/convert/subversion.py b/hgext/convert/subversion.py
index 094988b..3e64ce6 100644
--- a/hgext/convert/subversion.py
+++ b/hgext/convert/subversion.py
@@ -2,14 +2,17 @@
#
# Copyright(C) 2007 Daniel Holth et al
-import os, re, sys, tempfile, urllib, urllib2, xml.dom.minidom
+import os
+import re
+import sys
import cPickle as pickle
+import tempfile
+import urllib
+import urllib2
from mercurial import strutil, scmutil, util, encoding
from mercurial.i18n import _
-propertycache = util.propertycache
-
# Subversion stuff. Works best with very recent Python SVN bindings
# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
# these bindings.
@@ -47,21 +50,10 @@ def revsplit(rev):
mod = '/' + parts[1]
return parts[0][4:], mod, int(revnum)
-def quote(s):
- # As of svn 1.7, many svn calls expect "canonical" paths. In
- # theory, we should call svn.core.*canonicalize() on all paths
- # before passing them to the API. Instead, we assume the base url
- # is canonical and copy the behaviour of svn URL encoding function
- # so we can extend it safely with new components. The "safe"
- # characters were taken from the "svn_uri__char_validity" table in
- # libsvn_subr/path.c.
- return urllib.quote(s, "!$&'()*+,-./:=@_~")
-
def geturl(path):
try:
return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
except SubversionException:
- # svn.client.url_from_path() fails with local repositories
pass
if os.path.isdir(path):
path = os.path.normpath(os.path.abspath(path))
@@ -70,8 +62,8 @@ def geturl(path):
# Module URL is later compared with the repository URL returned
# by svn API, which is UTF-8.
path = encoding.tolocal(path)
- path = 'file://%s' % quote(path)
- return svn.core.svn_path_canonicalize(path)
+ return 'file://%s' % urllib.quote(path)
+ return path
def optrev(number):
optrev = svn.core.svn_opt_revision_t()
@@ -85,8 +77,8 @@ class changedpath(object):
self.copyfrom_rev = p.copyfrom_rev
self.action = p.action
-def get_log_child(fp, url, paths, start, end, limit=0,
- discover_changed_paths=True, strict_node_history=False):
+def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
+ strict_node_history=False):
protocol = -1
def receiver(orig_paths, revnum, author, date, message, pool):
if orig_paths is not None:
@@ -103,11 +95,11 @@ def get_log_child(fp, url, paths, start, end, limit=0,
discover_changed_paths,
strict_node_history,
receiver)
+ except SubversionException, (inst, num):
+ pickle.dump(num, fp, protocol)
except IOError:
# Caller may interrupt the iteration
pickle.dump(None, fp, protocol)
- except Exception, inst:
- pickle.dump(str(inst), fp, protocol)
else:
pickle.dump(None, fp, protocol)
fp.close()
@@ -120,10 +112,6 @@ def debugsvnlog(ui, **opts):
"""Fetch SVN log in a subprocess and channel them back to parent to
avoid memory collection issues.
"""
- if svn is None:
- raise util.Abort(_('debugsvnlog could not load Subversion python '
- 'bindings'))
-
util.setbinary(sys.stdin)
util.setbinary(sys.stdout)
args = decodeargs(sys.stdin.read())
@@ -143,10 +131,10 @@ class logstream(object):
' hg executable is in PATH'))
try:
orig_paths, revnum, author, date, message = entry
- except (TypeError, ValueError):
+ except:
if entry is None:
break
- raise util.Abort(_("log stream exception '%s'") % entry)
+ raise SubversionException("child raised exception", entry)
yield entry
def close(self):
@@ -180,7 +168,7 @@ def httpcheck(ui, path, proto):
'know better.\n'))
return True
data = inst.fp.read()
- except Exception:
+ except:
# Could be urllib2.URLError if the URL is invalid or anything else.
return False
return '<m:human-readable errcode="160013">' in data
@@ -193,15 +181,12 @@ def issvnurl(ui, url):
try:
proto, path = url.split('://', 1)
if proto == 'file':
- if (os.name == 'nt' and path[:1] == '/' and path[1:2].isalpha()
- and path[2:6].lower() == '%3a/'):
- path = path[:2] + ':/' + path[6:]
path = urllib.url2pathname(path)
except ValueError:
proto = 'file'
path = os.path.abspath(url)
if proto == 'file':
- path = util.pconvert(path)
+ path = path.replace(os.sep, '/')
check = protomap.get(proto, lambda *args: False)
while '/' in path:
if check(ui, path, proto):
@@ -234,7 +219,7 @@ class svn_source(converter_source):
raise NoRepo(_("%s does not look like a Subversion repository")
% url)
if svn is None:
- raise MissingTool(_('could not load Subversion python bindings'))
+ raise MissingTool(_('Could not load Subversion python bindings'))
try:
version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
@@ -283,8 +268,7 @@ class svn_source(converter_source):
except ValueError:
raise util.Abort(_('svn: revision %s is not an integer') % rev)
- self.trunkname = self.ui.config('convert', 'svn.trunk',
- 'trunk').strip('/')
+ self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/')
self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
try:
self.startrev = int(self.startrev)
@@ -322,7 +306,7 @@ class svn_source(converter_source):
def exists(self, path, optrev):
try:
- svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
+ svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
optrev, False, self.ctx)
return True
except SubversionException:
@@ -374,7 +358,7 @@ class svn_source(converter_source):
# Check if branches bring a few more heads to the list
if branches:
rpath = self.url.strip('/')
- branchnames = svn.client.ls(rpath + '/' + quote(branches),
+ branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
rev, False, self.ctx)
for branch in branchnames.keys():
module = '%s/%s/%s' % (oldmodule, branches, branch)
@@ -410,7 +394,7 @@ class svn_source(converter_source):
else:
# Perform a full checkout on roots
uuid, module, revnum = revsplit(rev)
- entries = svn.client.ls(self.baseurl + quote(module),
+ entries = svn.client.ls(self.baseurl + urllib.quote(module),
optrev(revnum), True, self.ctx)
files = [n for n, e in entries.iteritems()
if e.kind == svn.core.svn_node_file]
@@ -444,8 +428,6 @@ class svn_source(converter_source):
if revnum < stop:
stop = revnum + 1
self._fetch_revisions(revnum, stop)
- if rev not in self.commits:
- raise util.Abort(_('svn: revision %s not found') % revnum)
commit = self.commits[rev]
# caller caches the result, so free it here to release memory
del self.commits[rev]
@@ -519,11 +501,11 @@ class svn_source(converter_source):
and not p[2].startswith(badroot + '/')]
# Tell tag renamings from tag creations
- renamings = []
+ remainings = []
for source, sourcerev, dest in pendings:
tagname = dest.split('/')[-1]
if source.startswith(srctagspath):
- renamings.append([source, sourcerev, tagname])
+ remainings.append([source, sourcerev, tagname])
continue
if tagname in tags:
# Keep the latest tag value
@@ -539,7 +521,7 @@ class svn_source(converter_source):
# but were really created in the tag
# directory.
pass
- pendings = renamings
+ pendings = remainings
tagspath = srctagspath
finally:
stream.close()
@@ -560,47 +542,18 @@ class svn_source(converter_source):
def revnum(self, rev):
return int(rev.split('@')[-1])
- def latest(self, path, stop=None):
- """Find the latest revid affecting path, up to stop revision
- number. If stop is None, default to repository latest
- revision. It may return a revision in a different module,
- since a branch may be moved without a change being
- reported. Return None if computed module does not belong to
- rootmodule subtree.
+ def latest(self, path, stop=0):
+ """Find the latest revid affecting path, up to stop. It may return
+ a revision in a different module, since a branch may be moved without
+ a change being reported. Return None if computed module does not
+ belong to rootmodule subtree.
"""
- def findchanges(path, start, stop=None):
- stream = self._getlog([path], start, stop or 1)
- try:
- for entry in stream:
- paths, revnum, author, date, message = entry
- if stop is None and paths:
- # We do not know the latest changed revision,
- # keep the first one with changed paths.
- break
- if revnum <= stop:
- break
-
- for p in paths:
- if (not path.startswith(p) or
- not paths[p].copyfrom_path):
- continue
- newpath = paths[p].copyfrom_path + path[len(p):]
- self.ui.debug("branch renamed from %s to %s at %d\n" %
- (path, newpath, revnum))
- path = newpath
- break
- if not paths:
- revnum = None
- return revnum, path
- finally:
- stream.close()
-
if not path.startswith(self.rootmodule):
# Requests on foreign branches may be forbidden at server level
self.ui.debug('ignoring foreign branch %r\n' % path)
return None
- if stop is None:
+ if not stop:
stop = svn.ra.get_latest_revnum(self.ra)
try:
prevmodule = self.reparent('')
@@ -615,30 +568,34 @@ class svn_source(converter_source):
# stat() gives us the previous revision on this line of
# development, but it might be in *another module*. Fetch the
# log and detect renames down to the latest revision.
- revnum, realpath = findchanges(path, stop, dirent.created_rev)
- if revnum is None:
- # Tools like svnsync can create empty revision, when
- # synchronizing only a subtree for instance. These empty
- # revisions created_rev still have their original values
- # despite all changes having disappeared and can be
- # returned by ra.stat(), at least when stating the root
- # module. In that case, do not trust created_rev and scan
- # the whole history.
- revnum, realpath = findchanges(path, stop)
- if revnum is None:
- self.ui.debug('ignoring empty branch %r\n' % realpath)
- return None
+ stream = self._getlog([path], stop, dirent.created_rev)
+ try:
+ for entry in stream:
+ paths, revnum, author, date, message = entry
+ if revnum <= dirent.created_rev:
+ break
- if not realpath.startswith(self.rootmodule):
- self.ui.debug('ignoring foreign branch %r\n' % realpath)
+ for p in paths:
+ if not path.startswith(p) or not paths[p].copyfrom_path:
+ continue
+ newpath = paths[p].copyfrom_path + path[len(p):]
+ self.ui.debug("branch renamed from %s to %s at %d\n" %
+ (path, newpath, revnum))
+ path = newpath
+ break
+ finally:
+ stream.close()
+
+ if not path.startswith(self.rootmodule):
+ self.ui.debug('ignoring foreign branch %r\n' % path)
return None
- return self.revid(revnum, realpath)
+ return self.revid(dirent.created_rev, path)
def reparent(self, module):
"""Reparent the svn transport and return the previous parent."""
if self.prevmodule == module:
return module
- svnurl = self.baseurl + quote(module)
+ svnurl = self.baseurl + urllib.quote(module)
prevmodule = self.prevmodule
if prevmodule is None:
prevmodule = ''
@@ -813,7 +770,7 @@ class svn_source(converter_source):
branch = None
cset = commit(author=author,
- date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+ date=util.datestr(date),
desc=log,
parents=parents,
branch=branch,
@@ -870,14 +827,13 @@ class svn_source(converter_source):
pass
except SubversionException, (inst, num):
if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
- raise util.Abort(_('svn: branch has no revision %s')
- % to_revnum)
+ raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
raise
def getfile(self, file, rev):
# TODO: ra.get_file transmits the whole file instead of diffs.
if file in self.removed:
- raise IOError
+ raise IOError()
mode = ''
try:
new_module, revnum = revsplit(rev)[1:]
@@ -898,7 +854,7 @@ class svn_source(converter_source):
notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
if e.apr_err in notfound: # File not found
- raise IOError
+ raise IOError()
raise
if mode == 'l':
link_prefix = "link "
@@ -910,7 +866,7 @@ class svn_source(converter_source):
"""Enumerate all files in path at revnum, recursively."""
path = path.strip('/')
pool = Pool()
- rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
+ rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
if path:
path += '/'
@@ -958,8 +914,8 @@ class svn_source(converter_source):
if not p.startswith('/'):
p = self.module + '/' + p
relpaths.append(p.strip('/'))
- args = [self.baseurl, relpaths, start, end, limit,
- discover_changed_paths, strict_node_history]
+ args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
+ strict_node_history]
arg = encodeargs(args)
hgexe = util.hgexecutable()
cmd = '%s debugsvnlog' % util.shellquote(hgexe)
@@ -1020,25 +976,26 @@ class svn_sink(converter_sink, commandline):
self.wc = None
self.cwd = os.getcwd()
+ path = os.path.realpath(path)
+
created = False
if os.path.isfile(os.path.join(path, '.svn', 'entries')):
- self.wc = os.path.realpath(path)
+ self.wc = path
self.run0('update')
else:
- if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
- path = os.path.realpath(path)
- if os.path.isdir(os.path.dirname(path)):
- if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
- ui.status(_('initializing svn repository %r\n') %
- os.path.basename(path))
- commandline(ui, 'svnadmin').run0('create', path)
- created = path
- path = util.normpath(path)
- if not path.startswith('/'):
- path = '/' + path
- path = 'file://' + path
-
wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
+
+ if os.path.isdir(os.path.dirname(path)):
+ if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
+ ui.status(_('initializing svn repository %r\n') %
+ os.path.basename(path))
+ commandline(ui, 'svnadmin').run0('create', path)
+ created = path
+ path = util.normpath(path)
+ if not path.startswith('/'):
+ path = '/' + path
+ path = 'file://' + path
+
ui.status(_('initializing svn working copy %r\n')
% os.path.basename(wcpath))
self.run0('checkout', path, wcpath)
@@ -1062,29 +1019,6 @@ class svn_sink(converter_sink, commandline):
def wjoin(self, *names):
return os.path.join(self.wc, *names)
- @propertycache
- def manifest(self):
- # As of svn 1.7, the "add" command fails when receiving
- # already tracked entries, so we have to track and filter them
- # ourselves.
- m = set()
- output = self.run0('ls', recursive=True, xml=True)
- doc = xml.dom.minidom.parseString(output)
- for e in doc.getElementsByTagName('entry'):
- for n in e.childNodes:
- if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
- continue
- name = ''.join(c.data for c in n.childNodes
- if c.nodeType == c.TEXT_NODE)
- # Entries are compared with names coming from
- # mercurial, so bytes with undefined encoding. Our
- # best bet is to assume they are in local
- # encoding. They will be passed to command line calls
- # later anyway, so they better be.
- m.add(encoding.tolocal(name.encode('utf-8')))
- break
- return m
-
def putfile(self, filename, flags, data):
if 'l' in flags:
self.wopener.symlink(data, filename)
@@ -1097,13 +1031,20 @@ class svn_sink(converter_sink, commandline):
self.wopener.write(filename, data)
if self.is_exec:
- if self.is_exec(self.wjoin(filename)):
- if 'x' not in flags:
- self.delexec.append(filename)
- else:
- if 'x' in flags:
- self.setexec.append(filename)
- util.setflags(self.wjoin(filename), False, 'x' in flags)
+ was_exec = self.is_exec(self.wjoin(filename))
+ else:
+ # On filesystems not supporting execute-bit, there is no way
+ # to know if it is set but asking subversion. Setting it
+ # systematically is just as expensive and much simpler.
+ was_exec = 'x' not in flags
+
+ util.setflags(self.wjoin(filename), False, 'x' in flags)
+ if was_exec:
+ if 'x' not in flags:
+ self.delexec.append(filename)
+ else:
+ if 'x' in flags:
+ self.setexec.append(filename)
def _copyfile(self, source, dest):
# SVN's copy command pukes if the destination file exists, but
@@ -1120,7 +1061,6 @@ class svn_sink(converter_sink, commandline):
try:
self.run0('copy', source, dest)
finally:
- self.manifest.add(dest)
if exists:
try:
os.unlink(wdest)
@@ -1139,16 +1079,13 @@ class svn_sink(converter_sink, commandline):
def add_dirs(self, files):
add_dirs = [d for d in sorted(self.dirs_of(files))
- if d not in self.manifest]
+ if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
if add_dirs:
- self.manifest.update(add_dirs)
self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
return add_dirs
def add_files(self, files):
- files = [f for f in files if f not in self.manifest]
if files:
- self.manifest.update(files)
self.xargs(files, 'add', quiet=True)
return files
@@ -1158,7 +1095,6 @@ class svn_sink(converter_sink, commandline):
wd = self.wjoin(d)
if os.listdir(wd) == '.svn':
self.run0('delete', d)
- self.manifest.remove(d)
deleted.append(d)
return deleted
@@ -1169,12 +1105,6 @@ class svn_sink(converter_sink, commandline):
return u"svn:%s@%s" % (self.uuid, rev)
def putcommit(self, files, copies, parents, commit, source, revmap):
- for parent in parents:
- try:
- return self.revid(self.childmap[parent])
- except KeyError:
- pass
-
# Apply changes to working copy
for f, v in files:
try:
@@ -1187,6 +1117,11 @@ class svn_sink(converter_sink, commandline):
self.copies.append([copies[f], f])
files = [f[0] for f in files]
+ for parent in parents:
+ try:
+ return self.revid(self.childmap[parent])
+ except KeyError:
+ pass
entries = set(self.delete)
files = frozenset(files)
entries.update(self.add_dirs(files.difference(entries)))
@@ -1196,8 +1131,6 @@ class svn_sink(converter_sink, commandline):
self.copies = []
if self.delete:
self.xargs(self.delete, 'delete')
- for f in self.delete:
- self.manifest.remove(f)
self.delete = []
entries.update(self.add_files(files.difference(entries)))
entries.update(self.tidy_dirs(entries))
@@ -1240,12 +1173,3 @@ class svn_sink(converter_sink, commandline):
def puttags(self, tags):
self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
return None, None
-
- def hascommit(self, rev):
- # This is not correct as one can convert to an existing subversion
- # repository and childmap would not list all revisions. Too bad.
- if rev in self.childmap:
- return True
- raise util.Abort(_('splice map revision %s not found in subversion '
- 'child map (revision lookups are not implemented)')
- % rev)
diff --git a/hgext/convert/transport.py b/hgext/convert/transport.py
index 6a8c565..db68ede 100644
--- a/hgext/convert/transport.py
+++ b/hgext/convert/transport.py
@@ -15,9 +15,9 @@
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-from mercurial import util
from svn.core import SubversionException, Pool
import svn.ra
import svn.client
@@ -54,7 +54,7 @@ def _create_auth_baton(pool):
if p:
providers.append(p)
else:
- if util.safehasattr(svn.client, 'get_windows_simple_provider'):
+ if hasattr(svn.client, 'get_windows_simple_provider'):
providers.append(svn.client.get_windows_simple_provider(pool))
return svn.core.svn_auth_open(providers, pool)
@@ -73,7 +73,7 @@ class SvnRaTransport(object):
self.password = ''
# Only Subversion 1.4 has reparent()
- if ra is None or not util.safehasattr(svn.ra, 'reparent'):
+ if ra is None or not hasattr(svn.ra, 'reparent'):
self.client = svn.client.create_context(self.pool)
ab = _create_auth_baton(self.pool)
if False:
@@ -85,7 +85,7 @@ class SvnRaTransport(object):
self.client.config = svn_config
try:
self.ra = svn.client.open_ra_session(
- self.svn_url,
+ self.svn_url.encode('utf8'),
self.client, self.pool)
except SubversionException, (inst, num):
if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
diff --git a/hgext/eol.py b/hgext/eol.py
index 951922c..52592fa 100644
--- a/hgext/eol.py
+++ b/hgext/eol.py
@@ -52,10 +52,9 @@ Example versioned ``.hgeol`` file::
The rules will first apply when files are touched in the working
copy, e.g. by updating to null and back to tip to touch all files.
-The extension uses an optional ``[eol]`` section read from both the
-normal Mercurial configuration files and the ``.hgeol`` file, with the
-latter overriding the former. You can use that section to control the
-overall behavior. There are three settings:
+The extension uses an optional ``[eol]`` section in your hgrc file
+(not the ``.hgeol`` file) for settings that control the overall
+behavior. There are two settings:
- ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
``CRLF`` to override the default interpretation of ``native`` for
@@ -68,10 +67,6 @@ overall behavior. There are three settings:
Such files are normally not touched under the assumption that they
have mixed EOLs on purpose.
-- ``eol.fix-trailing-newline`` (default False) can be set to True to
- ensure that converted files end with a EOL character (either ``\\n``
- or ``\\r\\n`` as per the configured patterns).
-
The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
like the deprecated win32text extension does. This means that you can
disable win32text and enable eol and your filters will still work. You
@@ -94,8 +89,6 @@ from mercurial.i18n import _
from mercurial import util, config, extensions, match, error
import re, os
-testedwith = 'internal'
-
# Matches a lone LF, i.e., one that is not part of CRLF.
singlelf = re.compile('(^|[^\r])\n')
# Matches a single EOL which can either be a CRLF where repeated CR
@@ -113,9 +106,6 @@ def tolf(s, params, ui, **kwargs):
return s
if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
return s
- if (ui.configbool('eol', 'fix-trailing-newline', False)
- and s and s[-1] != '\n'):
- s = s + '\n'
return eolre.sub('\n', s)
def tocrlf(s, params, ui, **kwargs):
@@ -124,9 +114,6 @@ def tocrlf(s, params, ui, **kwargs):
return s
if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
return s
- if (ui.configbool('eol', 'fix-trailing-newline', False)
- and s and s[-1] != '\n'):
- s = s + '\n'
return eolre.sub('\r\n', s)
def isbinary(s, params):
@@ -171,7 +158,7 @@ class eolfile(object):
# about inconsistent newlines.
self.match = match.match(root, '', [], include, exclude)
- def copytoui(self, ui):
+ def setfilters(self, ui):
for pattern, style in self.cfg.items('patterns'):
key = style.upper()
try:
@@ -180,9 +167,6 @@ class eolfile(object):
except KeyError:
ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
% (style, self.cfg.source('patterns', pattern)))
- # eol.only-consistent can be specified in ~/.hgrc or .hgeol
- for k, v in self.cfg.items('eol'):
- ui.setconfig('eol', k, v)
def checkrev(self, repo, ctx, files):
failed = []
@@ -256,6 +240,7 @@ def checkheadshook(ui, repo, node, hooktype, **kwargs):
hook = checkheadshook
def preupdate(ui, repo, hooktype, parent1, parent2):
+ #print "preupdate for %s: %s -> %s" % (repo.root, parent1, parent2)
repo.loadeol([parent1])
return False
@@ -273,6 +258,7 @@ def extsetup(ui):
def reposetup(ui, repo):
uisetup(repo.ui)
+ #print "reposetup for", repo.root
if not repo.local():
return
@@ -287,7 +273,7 @@ def reposetup(ui, repo):
eol = parseeol(self.ui, self, nodes)
if eol is None:
return None
- eol.copytoui(self.ui)
+ eol.setfilters(self.ui)
return eol.match
def _hgcleardirstate(self):
@@ -317,7 +303,7 @@ def reposetup(ui, repo):
# again since the new .hgeol file might no
# longer match a file it matched before
self.dirstate.normallookup(f)
- # Create or touch the cache to update mtime
+ # Touch the cache to update mtime.
self.opener("eol.cache", "w").close()
wlock.release()
except error.LockUnavailable:
diff --git a/hgext/extdiff.py b/hgext/extdiff.py
index bae60e8..8c2519e 100644
--- a/hgext/extdiff.py
+++ b/hgext/extdiff.py
@@ -33,8 +33,7 @@ you do not need to type :hg:`extdiff -p kdiff3` always. ::
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
- vimdiff = gvim -f "+next" \\
- "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
+ vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
Tool arguments can include variables that are expanded at runtime::
@@ -66,8 +65,6 @@ from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
-testedwith = 'internal'
-
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
@@ -90,7 +87,7 @@ def snapshot(ui, repo, files, node, tmproot):
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
- if wfn not in ctx:
+ if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
diff --git a/hgext/factotum.py b/hgext/factotum.py
deleted file mode 100644
index 098c5a2..0000000
--- a/hgext/factotum.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# factotum.py - Plan 9 factotum integration for Mercurial
-#
-# Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation; either version 2 of the License, or (at your
-# option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
-# Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''http authentication with factotum
-
-This extension allows the factotum(4) facility on Plan 9 from Bell Labs
-platforms to provide authentication information for HTTP access. Configuration
-entries specified in the auth section as well as authentication information
-provided in the repository URL are fully supported. If no prefix is specified,
-a value of "*" will be assumed.
-
-By default, keys are specified as::
-
- proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
-
-If the factotum extension is unable to read the required key, one will be
-requested interactively.
-
-A configuration section is available to customize runtime behavior. By
-default, these entries are::
-
- [factotum]
- executable = /bin/auth/factotum
- mountpoint = /mnt/factotum
- service = hg
-
-The executable entry defines the full path to the factotum binary. The
-mountpoint entry defines the path to the factotum file service. Lastly, the
-service entry controls the service name used when reading keys.
-
-'''
-
-from mercurial.i18n import _
-from mercurial.url import passwordmgr
-from mercurial import httpconnection, urllib2, util
-import os
-
-ERRMAX = 128
-
-def auth_getkey(self, params):
- if not self.ui.interactive():
- raise util.Abort(_('factotum not interactive'))
- if 'user=' not in params:
- params = '%s user?' % params
- params = '%s !password?' % params
- os.system("%s -g '%s'" % (_executable, params))
-
-def auth_getuserpasswd(self, getkey, params):
- params = 'proto=pass %s' % params
- while True:
- fd = os.open('%s/rpc' % _mountpoint, os.O_RDWR)
- try:
- try:
- os.write(fd, 'start %s' % params)
- l = os.read(fd, ERRMAX).split()
- if l[0] == 'ok':
- os.write(fd, 'read')
- l = os.read(fd, ERRMAX).split()
- if l[0] == 'ok':
- return l[1:]
- except (OSError, IOError):
- raise util.Abort(_('factotum not responding'))
- finally:
- os.close(fd)
- getkey(self, params)
-
-def monkeypatch_method(cls):
- def decorator(func):
- setattr(cls, func.__name__, func)
- return func
- return decorator
-
-@monkeypatch_method(passwordmgr)
-def find_user_password(self, realm, authuri):
- user, passwd = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
- self, realm, authuri)
- if user and passwd:
- self._writedebug(user, passwd)
- return (user, passwd)
-
- prefix = ''
- res = httpconnection.readauthforuri(self.ui, authuri, user)
- if res:
- _, auth = res
- prefix = auth.get('prefix')
- user, passwd = auth.get('username'), auth.get('password')
- if not user or not passwd:
- if not prefix:
- prefix = '*'
- params = 'service=%s prefix=%s' % (_service, prefix)
- if user:
- params = '%s user=%s' % (params, user)
- user, passwd = auth_getuserpasswd(self, auth_getkey, params)
-
- self.add_password(realm, authuri, user, passwd)
- self._writedebug(user, passwd)
- return (user, passwd)
-
-def uisetup(ui):
- global _executable
- _executable = ui.config('factotum', 'executable', '/bin/auth/factotum')
- global _mountpoint
- _mountpoint = ui.config('factotum', 'mountpoint', '/mnt/factotum')
- global _service
- _service = ui.config('factotum', 'service', 'hg')
diff --git a/hgext/fetch.py b/hgext/fetch.py
index 491d8b2..23061cd 100644
--- a/hgext/fetch.py
+++ b/hgext/fetch.py
@@ -5,15 +5,13 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-'''pull, update and merge in one command (DEPRECATED)'''
+'''pull, update and merge in one command'''
from mercurial.i18n import _
from mercurial.node import nullid, short
from mercurial import commands, cmdutil, hg, util, error
from mercurial.lock import release
-testedwith = 'internal'
-
def fetch(ui, repo, source='default', **opts):
'''pull changes from a remote repository, merge new changes if needed.
@@ -25,9 +23,10 @@ def fetch(ui, repo, source='default', **opts):
Otherwise, the working directory is updated to include the new
changes.
- When a merge is needed, the working directory is first updated to
- the newly pulled changes. Local changes are then merged into the
- pulled changes. To switch the merge order, use --switch-parent.
+ When a merge occurs, the newly pulled changes are assumed to be
+ "authoritative". The head of the new changes is used as the first
+ parent, with local changes as the second. To switch the merge
+ order, use --switch-parent.
See :hg:`help dates` for a list of formats valid for -d/--date.
@@ -40,10 +39,7 @@ def fetch(ui, repo, source='default', **opts):
parent, p2 = repo.dirstate.parents()
branch = repo.dirstate.branch()
- try:
- branchnode = repo.branchtip(branch)
- except error.RepoLookupError:
- branchnode = None
+ branchnode = repo.branchtags().get(branch)
if parent != branchnode:
raise util.Abort(_('working dir not at branch tip '
'(use "hg update" to check out branch tip)'))
@@ -75,7 +71,7 @@ def fetch(ui, repo, source='default', **opts):
try:
revs = [other.lookup(rev) for rev in opts['rev']]
except error.CapabilityError:
- err = _("other repository doesn't support revision lookup, "
+ err = _("Other repository doesn't support revision lookup, "
"so a rev cannot be specified.")
raise util.Abort(err)
@@ -87,9 +83,9 @@ def fetch(ui, repo, source='default', **opts):
# Is this a simple fast-forward along the current branch?
newheads = repo.branchheads(branch)
newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
- if len(newheads) == 1 and len(newchildren):
+ if len(newheads) == 1:
if newchildren[0] != parent:
- return hg.update(repo, newchildren[0])
+ return hg.clean(repo, newchildren[0])
else:
return 0
@@ -106,9 +102,6 @@ def fetch(ui, repo, source='default', **opts):
(len(newheads) - 1))
return 1
- if not newheads:
- return 0
-
# Otherwise, let's merge.
err = False
if newheads:
diff --git a/hgext/gpg.py b/hgext/gpg.py
index 2ded54c..3ffd836 100644
--- a/hgext/gpg.py
+++ b/hgext/gpg.py
@@ -12,7 +12,6 @@ from mercurial.i18n import _
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
class gpg(object):
def __init__(self, path, key=None):
@@ -44,7 +43,7 @@ class gpg(object):
try:
if f:
os.unlink(f)
- except OSError:
+ except:
pass
keys = []
key, fingerprint = None, None
@@ -164,7 +163,7 @@ def sigs(ui, repo):
r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
ui.write("%-30s %s\n" % (keystr(ui, k), r))
-@command("sigcheck", [], _('hg sigcheck REV'))
+@command("sigcheck", [], _('hg sigcheck REVISION'))
def check(ui, repo, rev):
"""verify all the signatures there may be for a particular revision"""
mygpg = newgpg(ui)
@@ -180,7 +179,7 @@ def check(ui, repo, rev):
keys.extend(k)
if not keys:
- ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
+ ui.write(_("No valid signature for %s\n") % hgnode.short(rev))
return
# print summary
@@ -206,7 +205,7 @@ def keystr(ui, key):
('m', 'message', '',
_('commit message'), _('TEXT')),
] + commands.commitopts2,
- _('hg sign [OPTION]... [REV]...'))
+ _('hg sign [OPTION]... [REVISION]...'))
def sign(ui, repo, *revs, **opts):
"""add a signature for the current or given revision
@@ -237,7 +236,7 @@ def sign(ui, repo, *revs, **opts):
for n in nodes:
hexnode = hgnode.hex(n)
- ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
+ ui.write(_("Signing %d:%s\n") % (repo.changelog.rev(n),
hgnode.short(n)))
# build data
data = node2txt(repo, n, sigver)
@@ -287,3 +286,4 @@ def node2txt(repo, node, ver):
return "%s\n" % hgnode.hex(node)
else:
raise util.Abort(_("unknown signature version"))
+
diff --git a/hgext/graphlog.py b/hgext/graphlog.py
index 9caed24..27366c1 100644
--- a/hgext/graphlog.py
+++ b/hgext/graphlog.py
@@ -12,36 +12,308 @@ commands. When this options is given, an ASCII representation of the
revision graph is also shown.
'''
+from mercurial.cmdutil import show_changeset
+from mercurial.commands import templateopts
from mercurial.i18n import _
-from mercurial import cmdutil, commands
+from mercurial.node import nullrev
+from mercurial import cmdutil, commands, extensions, scmutil
+from mercurial import hg, util, graphmod
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
+
+ASCIIDATA = 'ASC'
+
+def asciiedges(type, char, lines, seen, rev, parents):
+ """adds edge info to changelog DAG walk suitable for ascii()"""
+ if rev not in seen:
+ seen.append(rev)
+ nodeidx = seen.index(rev)
+
+ knownparents = []
+ newparents = []
+ for parent in parents:
+ if parent in seen:
+ knownparents.append(parent)
+ else:
+ newparents.append(parent)
+
+ ncols = len(seen)
+ nextseen = seen[:]
+ nextseen[nodeidx:nodeidx + 1] = newparents
+ edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
+
+ while len(newparents) > 2:
+ # ascii() only knows how to add or remove a single column between two
+ # calls. Nodes with more than two parents break this constraint so we
+ # introduce intermediate expansion lines to grow the active node list
+ # slowly.
+ edges.append((nodeidx, nodeidx))
+ edges.append((nodeidx, nodeidx + 1))
+ nmorecols = 1
+ yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
+ char = '\\'
+ lines = []
+ nodeidx += 1
+ ncols += 1
+ edges = []
+ del newparents[0]
+
+ if len(newparents) > 0:
+ edges.append((nodeidx, nodeidx))
+ if len(newparents) > 1:
+ edges.append((nodeidx, nodeidx + 1))
+ nmorecols = len(nextseen) - ncols
+ seen[:] = nextseen
+ yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
+
+def fix_long_right_edges(edges):
+ for (i, (start, end)) in enumerate(edges):
+ if end > start:
+ edges[i] = (start, end + 1)
+
+def get_nodeline_edges_tail(
+ node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
+ if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
+ # Still going in the same non-vertical direction.
+ if n_columns_diff == -1:
+ start = max(node_index + 1, p_node_index)
+ tail = ["|", " "] * (start - node_index - 1)
+ tail.extend(["/", " "] * (n_columns - start))
+ return tail
+ else:
+ return ["\\", " "] * (n_columns - node_index - 1)
+ else:
+ return ["|", " "] * (n_columns - node_index - 1)
+
+def draw_edges(edges, nodeline, interline):
+ for (start, end) in edges:
+ if start == end + 1:
+ interline[2 * end + 1] = "/"
+ elif start == end - 1:
+ interline[2 * start + 1] = "\\"
+ elif start == end:
+ interline[2 * start] = "|"
+ else:
+ if 2 * end >= len(nodeline):
+ continue
+ nodeline[2 * end] = "+"
+ if start > end:
+ (start, end) = (end, start)
+ for i in range(2 * start + 1, 2 * end):
+ if nodeline[i] != "+":
+ nodeline[i] = "-"
+
+def get_padding_line(ni, n_columns, edges):
+ line = []
+ line.extend(["|", " "] * ni)
+ if (ni, ni - 1) in edges or (ni, ni) in edges:
+ # (ni, ni - 1) (ni, ni)
+ # | | | | | | | |
+ # +---o | | o---+
+ # | | c | | c | |
+ # | |/ / | |/ /
+ # | | | | | |
+ c = "|"
+ else:
+ c = " "
+ line.extend([c, " "])
+ line.extend(["|", " "] * (n_columns - ni - 1))
+ return line
+
+def asciistate():
+ """returns the initial value for the "state" argument to ascii()"""
+ return [0, 0]
+
+def ascii(ui, state, type, char, text, coldata):
+ """prints an ASCII graph of the DAG
+
+ takes the following arguments (one call per node in the graph):
+
+ - ui to write to
+ - Somewhere to keep the needed state in (init to asciistate())
+ - Column of the current node in the set of ongoing edges.
+ - Type indicator of node data == ASCIIDATA.
+ - Payload: (char, lines):
+ - Character to use as node's symbol.
+ - List of lines to display as the node's text.
+ - Edges; a list of (col, next_col) indicating the edges between
+ the current node and its parents.
+ - Number of columns (ongoing edges) in the current revision.
+ - The difference between the number of columns (ongoing edges)
+ in the next revision and the number of columns (ongoing edges)
+ in the current revision. That is: -1 means one column removed;
+ 0 means no columns added or removed; 1 means one column added.
+ """
+
+ idx, edges, ncols, coldiff = coldata
+ assert -2 < coldiff < 2
+ if coldiff == -1:
+ # Transform
+ #
+ # | | | | | |
+ # o | | into o---+
+ # |X / |/ /
+ # | | | |
+ fix_long_right_edges(edges)
+
+ # add_padding_line says whether to rewrite
+ #
+ # | | | | | | | |
+ # | o---+ into | o---+
+ # | / / | | | # <--- padding line
+ # o | | | / /
+ # o | |
+ add_padding_line = (len(text) > 2 and coldiff == -1 and
+ [x for (x, y) in edges if x + 1 < y])
+
+ # fix_nodeline_tail says whether to rewrite
+ #
+ # | | o | | | | o | |
+ # | | |/ / | | |/ /
+ # | o | | into | o / / # <--- fixed nodeline tail
+ # | |/ / | |/ /
+ # o | | o | |
+ fix_nodeline_tail = len(text) <= 2 and not add_padding_line
+
+ # nodeline is the line containing the node character (typically o)
+ nodeline = ["|", " "] * idx
+ nodeline.extend([char, " "])
+
+ nodeline.extend(
+ get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
+ state[0], fix_nodeline_tail))
+
+ # shift_interline is the line containing the non-vertical
+ # edges between this entry and the next
+ shift_interline = ["|", " "] * idx
+ if coldiff == -1:
+ n_spaces = 1
+ edge_ch = "/"
+ elif coldiff == 0:
+ n_spaces = 2
+ edge_ch = "|"
+ else:
+ n_spaces = 3
+ edge_ch = "\\"
+ shift_interline.extend(n_spaces * [" "])
+ shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
+
+ # draw edges from the current node to its parents
+ draw_edges(edges, nodeline, shift_interline)
+
+ # lines is the list of all graph lines to print
+ lines = [nodeline]
+ if add_padding_line:
+ lines.append(get_padding_line(idx, ncols, edges))
+ lines.append(shift_interline)
+
+ # make sure that there are as many graph lines as there are
+ # log strings
+ while len(text) < len(lines):
+ text.append("")
+ if len(lines) < len(text):
+ extra_interline = ["|", " "] * (ncols + coldiff)
+ while len(lines) < len(text):
+ lines.append(extra_interline)
+
+ # print lines
+ indentation_level = max(ncols, ncols + coldiff)
+ for (line, logstr) in zip(lines, text):
+ ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
+ ui.write(ln.rstrip() + '\n')
+
+ # ... and start over
+ state[0] = coldiff
+ state[1] = idx
+
+def get_revs(repo, rev_opt):
+ if rev_opt:
+ revs = scmutil.revrange(repo, rev_opt)
+ if len(revs) == 0:
+ return (nullrev, nullrev)
+ return (max(revs), min(revs))
+ else:
+ return (len(repo) - 1, 0)
+
+def check_unsupported_flags(pats, opts):
+ for op in ["follow_first", "copies", "newest_first"]:
+ if op in opts and opts[op]:
+ raise util.Abort(_("-G/--graph option is incompatible with --%s")
+ % op.replace("_", "-"))
+ if pats and opts.get('follow'):
+ raise util.Abort(_("-G/--graph option is incompatible with --follow "
+ "with file argument"))
+
+def revset(pats, opts):
+ """Return revset str built of revisions, log options and file patterns.
+ """
+ opt2revset = {
+ 'follow': (0, 'follow()'),
+ 'no_merges': (0, 'not merge()'),
+ 'only_merges': (0, 'merge()'),
+ 'removed': (0, 'removes("*")'),
+ 'date': (1, 'date($)'),
+ 'branch': (2, 'branch($)'),
+ 'exclude': (2, 'not file($)'),
+ 'include': (2, 'file($)'),
+ 'keyword': (2, 'keyword($)'),
+ 'only_branch': (2, 'branch($)'),
+ 'prune': (2, 'not ($ or ancestors($))'),
+ 'user': (2, 'user($)'),
+ }
+ optrevset = []
+ revset = []
+ for op, val in opts.iteritems():
+ if not val:
+ continue
+ if op == 'rev':
+ # Already a revset
+ revset.extend(val)
+ if op not in opt2revset:
+ continue
+ arity, revop = opt2revset[op]
+ revop = revop.replace('$', '%(val)r')
+ if arity == 0:
+ optrevset.append(revop)
+ elif arity == 1:
+ optrevset.append(revop % {'val': val})
+ else:
+ for f in val:
+ optrevset.append(revop % {'val': f})
+
+ for path in pats:
+ optrevset.append('file(%r)' % path)
+
+ if revset or optrevset:
+ if revset:
+ revset = ['(' + ' or '.join(revset) + ')']
+ if optrevset:
+ revset.append('(' + ' and '.join(optrevset) + ')')
+ revset = ' and '.join(revset)
+ else:
+ revset = 'all()'
+ return revset
+
+def generate(ui, dag, displayer, showparents, edgefn):
+ seen, state = [], asciistate()
+ for rev, type, ctx, parents in dag:
+ char = ctx.node() in showparents and '@' or 'o'
+ displayer.show(ctx)
+ lines = displayer.hunk.pop(rev).split('\n')[:-1]
+ displayer.flush(rev)
+ edges = edgefn(type, char, lines, seen, rev, parents)
+ for type, char, lines, coldata in edges:
+ ascii(ui, state, type, char, lines, coldata)
+ displayer.close()
@command('glog',
- [('f', 'follow', None,
- _('follow changeset history, or file history across copies and renames')),
- ('', 'follow-first', None,
- _('only follow the first parent of merge changesets (DEPRECATED)')),
- ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
- ('C', 'copies', None, _('show copied files')),
- ('k', 'keyword', [],
- _('do case-insensitive search for a given text'), _('TEXT')),
+ [('l', 'limit', '',
+ _('limit number of changes displayed'), _('NUM')),
+ ('p', 'patch', False, _('show patch')),
('r', 'rev', [], _('show the specified revision or range'), _('REV')),
- ('', 'removed', None, _('include revisions where files were removed')),
- ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
- ('u', 'user', [], _('revisions committed by user'), _('USER')),
- ('', 'only-branch', [],
- _('show only changesets within the given named branch (DEPRECATED)'),
- _('BRANCH')),
- ('b', 'branch', [],
- _('show changesets within the given named branch'), _('BRANCH')),
- ('P', 'prune', [],
- _('do not display revision or any of its ancestors'), _('REV')),
- ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
- ] + commands.logopts + commands.walkopts,
- _('[OPTION]... [FILE]'))
+ ] + templateopts,
+ _('hg glog [OPTION]... [FILE]'))
def graphlog(ui, repo, *pats, **opts):
"""show revision history alongside an ASCII revision graph
@@ -51,4 +323,77 @@ def graphlog(ui, repo, *pats, **opts):
Nodes printed as an @ character are parents of the working
directory.
"""
- return cmdutil.graphlog(ui, repo, *pats, **opts)
+
+ check_unsupported_flags(pats, opts)
+
+ revs = sorted(scmutil.revrange(repo, [revset(pats, opts)]), reverse=1)
+ limit = cmdutil.loglimit(opts)
+ if limit is not None:
+ revs = revs[:limit]
+ revdag = graphmod.dagwalker(repo, revs)
+
+ displayer = show_changeset(ui, repo, opts, buffered=True)
+ showparents = [ctx.node() for ctx in repo[None].parents()]
+ generate(ui, revdag, displayer, showparents, asciiedges)
+
+def graphrevs(repo, nodes, opts):
+ limit = cmdutil.loglimit(opts)
+ nodes.reverse()
+ if limit is not None:
+ nodes = nodes[:limit]
+ return graphmod.nodes(repo, nodes)
+
+def goutgoing(ui, repo, dest=None, **opts):
+ """show the outgoing changesets alongside an ASCII revision graph
+
+ Print the outgoing changesets alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+
+ check_unsupported_flags([], opts)
+ o = hg._outgoing(ui, repo, dest, opts)
+ if o is None:
+ return
+
+ revdag = graphrevs(repo, o, opts)
+ displayer = show_changeset(ui, repo, opts, buffered=True)
+ showparents = [ctx.node() for ctx in repo[None].parents()]
+ generate(ui, revdag, displayer, showparents, asciiedges)
+
+def gincoming(ui, repo, source="default", **opts):
+ """show the incoming changesets alongside an ASCII revision graph
+
+ Print the incoming changesets alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+ def subreporecurse():
+ return 1
+
+ check_unsupported_flags([], opts)
+ def display(other, chlist, displayer):
+ revdag = graphrevs(other, chlist, opts)
+ showparents = [ctx.node() for ctx in repo[None].parents()]
+ generate(ui, revdag, displayer, showparents, asciiedges)
+
+ hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
+
+def uisetup(ui):
+ '''Initialize the extension.'''
+ _wrapcmd('log', commands.table, graphlog)
+ _wrapcmd('incoming', commands.table, gincoming)
+ _wrapcmd('outgoing', commands.table, goutgoing)
+
+def _wrapcmd(cmd, table, wrapfn):
+ '''wrap the command'''
+ def graph(orig, *args, **kwargs):
+ if kwargs['graph']:
+ return wrapfn(*args, **kwargs)
+ return orig(*args, **kwargs)
+ entry = extensions.wrapcommand(table, cmd, graph)
+ entry[1].append(('G', 'graph', None, _("show the revision DAG")))
diff --git a/hgext/hgcia.py b/hgext/hgcia.py
index 075840a..6a3ea66 100644
--- a/hgext/hgcia.py
+++ b/hgext/hgcia.py
@@ -46,15 +46,17 @@ from mercurial.node import bin, short
from mercurial import cmdutil, patch, templater, util, mail
import email.Parser
-import socket, xmlrpclib
+import xmlrpclib
from xml.sax import saxutils
-testedwith = 'internal'
socket_timeout = 30 # seconds
-if util.safehasattr(socket, 'setdefaulttimeout'):
+try:
# set a timeout for the socket so you don't have to wait so looooong
# when cia.vc is having problems. requires python >= 2.3:
+ import socket
socket.setdefaulttimeout(socket_timeout)
+except:
+ pass
HGCIA_VERSION = '0.1'
HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'
@@ -111,7 +113,7 @@ class ciamsg(object):
# diffstat is stupid
self.name = 'cia'
def write(self, data):
- self.lines += data.splitlines(True)
+ self.lines.append(data)
def close(self):
pass
diff --git a/hgext/hgk.py b/hgext/hgk.py
index 304b910..e57d1aa 100644
--- a/hgext/hgk.py
+++ b/hgext/hgk.py
@@ -39,8 +39,6 @@ from mercurial import commands, util, patch, revlog, scmutil
from mercurial.node import nullid, nullrev, short
from mercurial.i18n import _
-testedwith = 'internal'
-
def difftree(ui, repo, node1=None, node2=None, *files, **opts):
"""diff trees from two commits"""
def __difftree(repo, node1, node2, files=[]):
@@ -97,8 +95,7 @@ def catcommit(ui, repo, n, prefix, ctx=None):
nlprefix = '\n' + prefix
if ctx is None:
ctx = repo[n]
- # use ctx.node() instead ??
- ui.write("tree %s\n" % short(ctx.changeset()[0]))
+ ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
for p in ctx.parents():
ui.write("parent %s\n" % p)
@@ -116,8 +113,7 @@ def catcommit(ui, repo, n, prefix, ctx=None):
ui.write("branch %s\n\n" % ctx.branch())
if prefix != "":
- ui.write("%s%s\n" % (prefix,
- description.replace('\n', nlprefix).strip()))
+ ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
else:
ui.write(description + "\n")
if prefix:
diff --git a/hgext/highlight/__init__.py b/hgext/highlight/__init__.py
index fc47815..55e3c18 100644
--- a/hgext/highlight/__init__.py
+++ b/hgext/highlight/__init__.py
@@ -24,7 +24,6 @@ The default is 'colorful'.
import highlight
from mercurial.hgweb import webcommands, webutil, common
from mercurial import extensions, encoding
-testedwith = 'internal'
def filerevision_highlight(orig, web, tmpl, fctx):
mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
@@ -52,13 +51,11 @@ def generate_css(web, req, tmpl):
pg_style = web.config('web', 'pygments_style', 'colorful')
fmter = highlight.HtmlFormatter(style = pg_style)
req.respond(common.HTTP_OK, 'text/css')
- return ['/* pygments_style = %s */\n\n' % pg_style,
- fmter.get_style_defs('')]
+ return ['/* pygments_style = %s */\n\n' % pg_style, fmter.get_style_defs('')]
def extsetup():
# monkeypatch in the new version
- extensions.wrapfunction(webcommands, '_filerevision',
- filerevision_highlight)
+ extensions.wrapfunction(webcommands, '_filerevision', filerevision_highlight)
extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
webcommands.highlightcss = generate_css
webcommands.__all__.append('highlightcss')
diff --git a/hgext/histedit.py b/hgext/histedit.py
deleted file mode 100644
index 88e0e93..0000000
--- a/hgext/histedit.py
+++ /dev/null
@@ -1,715 +0,0 @@
-# histedit.py - interactive history editing for mercurial
-#
-# Copyright 2009 Augie Fackler <raf@durin42.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-"""interactive history editing
-
-With this extension installed, Mercurial gains one new command: histedit. Usage
-is as follows, assuming the following history::
-
- @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
- | Add delta
- |
- o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
- | Add gamma
- |
- o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
- | Add beta
- |
- o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
- Add alpha
-
-If you were to run ``hg histedit c561b4e977df``, you would see the following
-file open in your editor::
-
- pick c561b4e977df Add beta
- pick 030b686bedc4 Add gamma
- pick 7c2fd3b9020c Add delta
-
- # Edit history between 633536316234 and 7c2fd3b9020c
- #
- # Commands:
- # p, pick = use commit
- # e, edit = use commit, but stop for amending
- # f, fold = use commit, but fold into previous commit
- # d, drop = remove commit from history
- # m, mess = edit message without changing commit content
- #
-
-In this file, lines beginning with ``#`` are ignored. You must specify a rule
-for each revision in your history. For example, if you had meant to add gamma
-before beta, and then wanted to add delta in the same revision as beta, you
-would reorganize the file to look like this::
-
- pick 030b686bedc4 Add gamma
- pick c561b4e977df Add beta
- fold 7c2fd3b9020c Add delta
-
- # Edit history between 633536316234 and 7c2fd3b9020c
- #
- # Commands:
- # p, pick = use commit
- # e, edit = use commit, but stop for amending
- # f, fold = use commit, but fold into previous commit
- # d, drop = remove commit from history
- # m, mess = edit message without changing commit content
- #
-
-At which point you close the editor and ``histedit`` starts working. When you
-specify a ``fold`` operation, ``histedit`` will open an editor when it folds
-those revisions together, offering you a chance to clean up the commit message::
-
- Add beta
- ***
- Add delta
-
-Edit the commit message to your liking, then close the editor. For
-this example, let's assume that the commit message was changed to
-``Add beta and delta.`` After histedit has run and had a chance to
-remove any old or temporary revisions it needed, the history looks
-like this::
-
- @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
- | Add beta and delta.
- |
- o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
- | Add gamma
- |
- o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
- Add alpha
-
-Note that ``histedit`` does *not* remove any revisions (even its own temporary
-ones) until after it has completed all the editing operations, so it will
-probably perform several strip operations when it's done. For the above example,
-it had to run strip twice. Strip can be slow depending on a variety of factors,
-so you might need to be a little patient. You can choose to keep the original
-revisions by passing the ``--keep`` flag.
-
-The ``edit`` operation will drop you back to a command prompt,
-allowing you to edit files freely, or even use ``hg record`` to commit
-some changes as a separate commit. When you're done, any remaining
-uncommitted changes will be committed as well. When done, run ``hg
-histedit --continue`` to finish this step. You'll be prompted for a
-new commit message, but the default commit message will be the
-original message for the ``edit`` ed revision.
-
-The ``message`` operation will give you a chance to revise a commit
-message without changing the contents. It's a shortcut for doing
-``edit`` immediately followed by `hg histedit --continue``.
-
-If ``histedit`` encounters a conflict when moving a revision (while
-handling ``pick`` or ``fold``), it'll stop in a similar manner to
-``edit`` with the difference that it won't prompt you for a commit
-message when done. If you decide at this point that you don't like how
-much work it will be to rearrange history, or that you made a mistake,
-you can use ``hg histedit --abort`` to abandon the new changes you
-have made and return to the state before you attempted to edit your
-history.
-
-If we clone the example repository above and add three more changes, such that
-we have the following history::
-
- @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
- | Add theta
- |
- o 5 140988835471 2009-04-27 18:04 -0500 stefan
- | Add eta
- |
- o 4 122930637314 2009-04-27 18:04 -0500 stefan
- | Add zeta
- |
- o 3 836302820282 2009-04-27 18:04 -0500 stefan
- | Add epsilon
- |
- o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
- | Add beta and delta.
- |
- o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
- | Add gamma
- |
- o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
- Add alpha
-
-If you run ``hg histedit --outgoing`` on the clone then it is the same
-as running ``hg histedit 836302820282``. If you need plan to push to a
-repository that Mercurial does not detect to be related to the source
-repo, you can add a ``--force`` option.
-"""
-
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-import tempfile
-import os
-
-from mercurial import bookmarks
-from mercurial import cmdutil
-from mercurial import discovery
-from mercurial import error
-from mercurial import hg
-from mercurial import lock as lockmod
-from mercurial import node
-from mercurial import patch
-from mercurial import repair
-from mercurial import scmutil
-from mercurial import util
-from mercurial.i18n import _
-
-cmdtable = {}
-command = cmdutil.command(cmdtable)
-
-testedwith = 'internal'
-
-editcomment = _("""# Edit history between %s and %s
-#
-# Commands:
-# p, pick = use commit
-# e, edit = use commit, but stop for amending
-# f, fold = use commit, but fold into previous commit (combines N and N-1)
-# d, drop = remove commit from history
-# m, mess = edit message without changing commit content
-#
-""")
-
-def between(repo, old, new, keep):
- revs = [old]
- current = old
- while current != new:
- ctx = repo[current]
- if not keep and len(ctx.children()) > 1:
- raise util.Abort(_('cannot edit history that would orphan nodes'))
- if len(ctx.parents()) != 1 and ctx.parents()[1] != node.nullid:
- raise util.Abort(_("can't edit history with merges"))
- if not ctx.children():
- current = new
- else:
- current = ctx.children()[0].node()
- revs.append(current)
- if len(repo[current].children()) and not keep:
- raise util.Abort(_('cannot edit history that would orphan nodes'))
- return revs
-
-
-def pick(ui, repo, ctx, ha, opts):
- oldctx = repo[ha]
- if oldctx.parents()[0] == ctx:
- ui.debug('node %s unchanged\n' % ha)
- return oldctx, [], [], []
- hg.update(repo, ctx.node())
- fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
- fp = os.fdopen(fd, 'w')
- diffopts = patch.diffopts(ui, opts)
- diffopts.git = True
- diffopts.ignorews = False
- diffopts.ignorewsamount = False
- diffopts.ignoreblanklines = False
- gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
- try:
- files = set()
- try:
- patch.patch(ui, repo, patchfile, files=files, eolmode=None)
- if not files:
- ui.warn(_('%s: empty changeset')
- % node.hex(ha))
- return ctx, [], [], []
- finally:
- os.unlink(patchfile)
- except Exception:
- raise util.Abort(_('Fix up the change and run '
- 'hg histedit --continue'))
- n = repo.commit(text=oldctx.description(), user=oldctx.user(),
- date=oldctx.date(), extra=oldctx.extra())
- return repo[n], [n], [oldctx.node()], []
-
-
-def edit(ui, repo, ctx, ha, opts):
- oldctx = repo[ha]
- hg.update(repo, ctx.node())
- fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
- fp = os.fdopen(fd, 'w')
- diffopts = patch.diffopts(ui, opts)
- diffopts.git = True
- diffopts.ignorews = False
- diffopts.ignorewsamount = False
- diffopts.ignoreblanklines = False
- gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
- try:
- files = set()
- try:
- patch.patch(ui, repo, patchfile, files=files, eolmode=None)
- finally:
- os.unlink(patchfile)
- except Exception:
- pass
- raise util.Abort(_('Make changes as needed, you may commit or record as '
- 'needed now.\nWhen you are finished, run hg'
- ' histedit --continue to resume.'))
-
-def fold(ui, repo, ctx, ha, opts):
- oldctx = repo[ha]
- hg.update(repo, ctx.node())
- fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
- fp = os.fdopen(fd, 'w')
- diffopts = patch.diffopts(ui, opts)
- diffopts.git = True
- diffopts.ignorews = False
- diffopts.ignorewsamount = False
- diffopts.ignoreblanklines = False
- gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
- try:
- files = set()
- try:
- patch.patch(ui, repo, patchfile, files=files, eolmode=None)
- if not files:
- ui.warn(_('%s: empty changeset')
- % node.hex(ha))
- return ctx, [], [], []
- finally:
- os.unlink(patchfile)
- except Exception:
- raise util.Abort(_('Fix up the change and run '
- 'hg histedit --continue'))
- n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
- date=oldctx.date(), extra=oldctx.extra())
- return finishfold(ui, repo, ctx, oldctx, n, opts, [])
-
-def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
- parent = ctx.parents()[0].node()
- hg.update(repo, parent)
- fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
- fp = os.fdopen(fd, 'w')
- diffopts = patch.diffopts(ui, opts)
- diffopts.git = True
- diffopts.ignorews = False
- diffopts.ignorewsamount = False
- diffopts.ignoreblanklines = False
- gen = patch.diff(repo, parent, newnode, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
- files = set()
- try:
- patch.patch(ui, repo, patchfile, files=files, eolmode=None)
- finally:
- os.unlink(patchfile)
- newmessage = '\n***\n'.join(
- [ctx.description()] +
- [repo[r].description() for r in internalchanges] +
- [oldctx.description()]) + '\n'
- # If the changesets are from the same author, keep it.
- if ctx.user() == oldctx.user():
- username = ctx.user()
- else:
- username = ui.username()
- newmessage = ui.edit(newmessage, username)
- n = repo.commit(text=newmessage, user=username,
- date=max(ctx.date(), oldctx.date()), extra=oldctx.extra())
- return repo[n], [n], [oldctx.node(), ctx.node()], [newnode]
-
-def drop(ui, repo, ctx, ha, opts):
- return ctx, [], [repo[ha].node()], []
-
-
-def message(ui, repo, ctx, ha, opts):
- oldctx = repo[ha]
- hg.update(repo, ctx.node())
- fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-')
- fp = os.fdopen(fd, 'w')
- diffopts = patch.diffopts(ui, opts)
- diffopts.git = True
- diffopts.ignorews = False
- diffopts.ignorewsamount = False
- diffopts.ignoreblanklines = False
- gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
- try:
- files = set()
- try:
- patch.patch(ui, repo, patchfile, files=files, eolmode=None)
- finally:
- os.unlink(patchfile)
- except Exception:
- raise util.Abort(_('Fix up the change and run '
- 'hg histedit --continue'))
- message = oldctx.description() + '\n'
- message = ui.edit(message, ui.username())
- new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(),
- extra=oldctx.extra())
- newctx = repo[new]
- if oldctx.node() != newctx.node():
- return newctx, [new], [oldctx.node()], []
- # We didn't make an edit, so just indicate no replaced nodes
- return newctx, [new], [], []
-
-
-def makedesc(c):
- summary = ''
- if c.description():
- summary = c.description().splitlines()[0]
- line = 'pick %s %d %s' % (c.hex()[:12], c.rev(), summary)
- return line[:80] # trim to 80 chars so it's not stupidly wide in my editor
-
-actiontable = {'p': pick,
- 'pick': pick,
- 'e': edit,
- 'edit': edit,
- 'f': fold,
- 'fold': fold,
- 'd': drop,
- 'drop': drop,
- 'm': message,
- 'mess': message,
- }
-
-@command('histedit',
- [('', 'commands', '',
- _('Read history edits from the specified file.')),
- ('c', 'continue', False, _('continue an edit already in progress')),
- ('k', 'keep', False,
- _("don't strip old nodes after edit is complete")),
- ('', 'abort', False, _('abort an edit in progress')),
- ('o', 'outgoing', False, _('changesets not found in destination')),
- ('f', 'force', False,
- _('force outgoing even for unrelated repositories')),
- ('r', 'rev', [], _('first revision to be edited'))],
- _("[PARENT]"))
-def histedit(ui, repo, *parent, **opts):
- """interactively edit changeset history
- """
- # TODO only abort if we try and histedit mq patches, not just
- # blanket if mq patches are applied somewhere
- mq = getattr(repo, 'mq', None)
- if mq and mq.applied:
- raise util.Abort(_('source has mq patches applied'))
-
- parent = list(parent) + opts.get('rev', [])
- if opts.get('outgoing'):
- if len(parent) > 1:
- raise util.Abort(
- _('only one repo argument allowed with --outgoing'))
- elif parent:
- parent = parent[0]
-
- dest = ui.expandpath(parent or 'default-push', parent or 'default')
- dest, revs = hg.parseurl(dest, None)[:2]
- ui.status(_('comparing with %s\n') % util.hidepassword(dest))
-
- revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
- other = hg.peer(repo, opts, dest)
-
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
-
- parent = discovery.findcommonoutgoing(
- repo, other, [], force=opts.get('force')).missing[0:1]
- else:
- if opts.get('force'):
- raise util.Abort(_('--force only allowed with --outgoing'))
-
- if opts.get('continue', False):
- if len(parent) != 0:
- raise util.Abort(_('no arguments allowed with --continue'))
- (parentctxnode, created, replaced,
- tmpnodes, existing, rules, keep, tip, replacemap) = readstate(repo)
- currentparent, wantnull = repo.dirstate.parents()
- parentctx = repo[parentctxnode]
- # existing is the list of revisions initially considered by
- # histedit. Here we use it to list new changesets, descendants
- # of parentctx without an 'existing' changeset in-between. We
- # also have to exclude 'existing' changesets which were
- # previously dropped.
- descendants = set(c.node() for c in
- repo.set('(%n::) - %n', parentctxnode, parentctxnode))
- existing = set(existing)
- notdropped = set(n for n in existing if n in descendants and
- (n not in replacemap or replacemap[n] in descendants))
- # Discover any nodes the user has added in the interim. We can
- # miss changesets which were dropped and recreated the same.
- newchildren = list(c.node() for c in repo.set(
- 'sort(%ln - (%ln or %ln::))', descendants, existing, notdropped))
- action, currentnode = rules.pop(0)
- if action in ('f', 'fold'):
- tmpnodes.extend(newchildren)
- else:
- created.extend(newchildren)
-
- m, a, r, d = repo.status()[:4]
- oldctx = repo[currentnode]
- message = oldctx.description() + '\n'
- if action in ('e', 'edit', 'm', 'mess'):
- message = ui.edit(message, ui.username())
- elif action in ('f', 'fold'):
- message = 'fold-temp-revision %s' % currentnode
- new = None
- if m or a or r or d:
- new = repo.commit(text=message, user=oldctx.user(),
- date=oldctx.date(), extra=oldctx.extra())
-
- # If we're resuming a fold and we have new changes, mark the
- # replacements and finish the fold. If not, it's more like a
- # drop of the changesets that disappeared, and we can skip
- # this step.
- if action in ('f', 'fold') and (new or newchildren):
- if new:
- tmpnodes.append(new)
- else:
- new = newchildren[-1]
- (parentctx, created_, replaced_, tmpnodes_) = finishfold(
- ui, repo, parentctx, oldctx, new, opts, newchildren)
- replaced.extend(replaced_)
- created.extend(created_)
- tmpnodes.extend(tmpnodes_)
- elif action not in ('d', 'drop'):
- if new != oldctx.node():
- replaced.append(oldctx.node())
- if new:
- if new != oldctx.node():
- created.append(new)
- parentctx = repo[new]
-
- elif opts.get('abort', False):
- if len(parent) != 0:
- raise util.Abort(_('no arguments allowed with --abort'))
- (parentctxnode, created, replaced, tmpnodes,
- existing, rules, keep, tip, replacemap) = readstate(repo)
- ui.debug('restore wc to old tip %s\n' % node.hex(tip))
- hg.clean(repo, tip)
- ui.debug('should strip created nodes %s\n' %
- ', '.join([node.hex(n)[:12] for n in created]))
- ui.debug('should strip temp nodes %s\n' %
- ', '.join([node.hex(n)[:12] for n in tmpnodes]))
- for nodes in (created, tmpnodes):
- lock = None
- try:
- lock = repo.lock()
- for n in reversed(nodes):
- try:
- repair.strip(ui, repo, n)
- except error.LookupError:
- pass
- finally:
- lockmod.release(lock)
- os.unlink(os.path.join(repo.path, 'histedit-state'))
- return
- else:
- cmdutil.bailifchanged(repo)
- if os.path.exists(os.path.join(repo.path, 'histedit-state')):
- raise util.Abort(_('history edit already in progress, try '
- '--continue or --abort'))
-
- tip, empty = repo.dirstate.parents()
-
-
- if len(parent) != 1:
- raise util.Abort(_('histedit requires exactly one parent revision'))
- parent = scmutil.revsingle(repo, parent[0]).node()
-
- keep = opts.get('keep', False)
- revs = between(repo, parent, tip, keep)
-
- ctxs = [repo[r] for r in revs]
- existing = [r.node() for r in ctxs]
- rules = opts.get('commands', '')
- if not rules:
- rules = '\n'.join([makedesc(c) for c in ctxs])
- rules += '\n\n'
- rules += editcomment % (node.hex(parent)[:12], node.hex(tip)[:12])
- rules = ui.edit(rules, ui.username())
- # Save edit rules in .hg/histedit-last-edit.txt in case
- # the user needs to ask for help after something
- # surprising happens.
- f = open(repo.join('histedit-last-edit.txt'), 'w')
- f.write(rules)
- f.close()
- else:
- f = open(rules)
- rules = f.read()
- f.close()
- rules = [l for l in (r.strip() for r in rules.splitlines())
- if l and not l[0] == '#']
- rules = verifyrules(rules, repo, ctxs)
-
- parentctx = repo[parent].parents()[0]
- keep = opts.get('keep', False)
- replaced = []
- replacemap = {}
- tmpnodes = []
- created = []
-
-
- while rules:
- writestate(repo, parentctx.node(), created, replaced,
- tmpnodes, existing, rules, keep, tip, replacemap)
- action, ha = rules.pop(0)
- (parentctx, created_, replaced_, tmpnodes_) = actiontable[action](
- ui, repo, parentctx, ha, opts)
-
- if replaced_:
- clen, rlen = len(created_), len(replaced_)
- if clen == rlen == 1:
- ui.debug('histedit: exact replacement of %s with %s\n' % (
- node.short(replaced_[0]), node.short(created_[0])))
-
- replacemap[replaced_[0]] = created_[0]
- elif clen > rlen:
- assert rlen == 1, ('unexpected replacement of '
- '%d changes with %d changes' % (rlen, clen))
- # made more changesets than we're replacing
- # TODO synthesize patch names for created patches
- replacemap[replaced_[0]] = created_[-1]
- ui.debug('histedit: created many, assuming %s replaced by %s' %
- (node.short(replaced_[0]), node.short(created_[-1])))
- elif rlen > clen:
- if not created_:
- # This must be a drop. Try and put our metadata on
- # the parent change.
- assert rlen == 1
- r = replaced_[0]
- ui.debug('histedit: %s seems replaced with nothing, '
- 'finding a parent\n' % (node.short(r)))
- pctx = repo[r].parents()[0]
- if pctx.node() in replacemap:
- ui.debug('histedit: parent is already replaced\n')
- replacemap[r] = replacemap[pctx.node()]
- else:
- replacemap[r] = pctx.node()
- ui.debug('histedit: %s best replaced by %s\n' % (
- node.short(r), node.short(replacemap[r])))
- else:
- assert len(created_) == 1
- for r in replaced_:
- ui.debug('histedit: %s replaced by %s\n' % (
- node.short(r), node.short(created_[0])))
- replacemap[r] = created_[0]
- else:
- assert False, (
- 'Unhandled case in replacement mapping! '
- 'replacing %d changes with %d changes' % (rlen, clen))
- created.extend(created_)
- replaced.extend(replaced_)
- tmpnodes.extend(tmpnodes_)
-
- hg.update(repo, parentctx.node())
-
- if not keep:
- if replacemap:
- ui.note(_('histedit: Should update metadata for the following '
- 'changes:\n'))
-
- def copybms(old, new):
- if old in tmpnodes or old in created:
- # can't have any metadata we'd want to update
- return
- while new in replacemap:
- new = replacemap[new]
- ui.note(_('histedit: %s to %s\n') % (node.short(old),
- node.short(new)))
- octx = repo[old]
- marks = octx.bookmarks()
- if marks:
- ui.note(_('histedit: moving bookmarks %s\n') %
- ', '.join(marks))
- for mark in marks:
- repo._bookmarks[mark] = new
- bookmarks.write(repo)
-
- # We assume that bookmarks on the tip should remain
- # tipmost, but bookmarks on non-tip changesets should go
- # to their most reasonable successor. As a result, find
- # the old tip and new tip and copy those bookmarks first,
- # then do the rest of the bookmark copies.
- oldtip = sorted(replacemap.keys(), key=repo.changelog.rev)[-1]
- newtip = sorted(replacemap.values(), key=repo.changelog.rev)[-1]
- copybms(oldtip, newtip)
-
- for old, new in sorted(replacemap.iteritems()):
- copybms(old, new)
- # TODO update mq state
-
- ui.debug('should strip replaced nodes %s\n' %
- ', '.join([node.hex(n)[:12] for n in replaced]))
- lock = None
- try:
- lock = repo.lock()
- for n in sorted(replaced, key=lambda x: repo[x].rev()):
- try:
- repair.strip(ui, repo, n)
- except error.LookupError:
- pass
- finally:
- lockmod.release(lock)
-
- ui.debug('should strip temp nodes %s\n' %
- ', '.join([node.hex(n)[:12] for n in tmpnodes]))
- lock = None
- try:
- lock = repo.lock()
- for n in reversed(tmpnodes):
- try:
- repair.strip(ui, repo, n)
- except error.LookupError:
- pass
- finally:
- lockmod.release(lock)
- os.unlink(os.path.join(repo.path, 'histedit-state'))
- if os.path.exists(repo.sjoin('undo')):
- os.unlink(repo.sjoin('undo'))
-
-
-def writestate(repo, parentctxnode, created, replaced,
- tmpnodes, existing, rules, keep, oldtip, replacemap):
- fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
- pickle.dump((parentctxnode, created, replaced,
- tmpnodes, existing, rules, keep, oldtip, replacemap),
- fp)
- fp.close()
-
-def readstate(repo):
- """Returns a tuple of (parentnode, created, replaced, tmp, existing, rules,
- keep, oldtip, replacemap ).
- """
- fp = open(os.path.join(repo.path, 'histedit-state'))
- return pickle.load(fp)
-
-
-def verifyrules(rules, repo, ctxs):
- """Verify that there exists exactly one edit rule per given changeset.
-
- Will abort if there are to many or too few rules, a malformed rule,
- or a rule on a changeset outside of the user-given range.
- """
- parsed = []
- if len(rules) != len(ctxs):
- raise util.Abort(_('must specify a rule for each changeset once'))
- for r in rules:
- if ' ' not in r:
- raise util.Abort(_('malformed line "%s"') % r)
- action, rest = r.split(' ', 1)
- if ' ' in rest.strip():
- ha, rest = rest.split(' ', 1)
- else:
- ha = r.strip()
- try:
- if repo[ha] not in ctxs:
- raise util.Abort(
- _('may not use changesets other than the ones listed'))
- except error.RepoError:
- raise util.Abort(_('unknown changeset %s listed') % ha)
- if action not in actiontable:
- raise util.Abort(_('unknown action "%s"') % action)
- parsed.append([action, ha])
- return parsed
diff --git a/hgext/inotify/__init__.py b/hgext/inotify/__init__.py
index 09c8bef..5e9f2cd 100644
--- a/hgext/inotify/__init__.py
+++ b/hgext/inotify/__init__.py
@@ -11,12 +11,9 @@
# todo: socket permissions
from mercurial.i18n import _
-from mercurial import util
import server
from client import client, QueryFailed
-testedwith = 'internal'
-
def serve(ui, repo, **opts):
'''start an inotify server for this repository'''
server.start(ui, repo.dirstate, repo.root, opts)
@@ -34,7 +31,7 @@ def debuginotify(ui, repo, **opts):
ui.write((' %s/\n') % path)
def reposetup(ui, repo):
- if not util.safehasattr(repo, 'dirstate'):
+ if not hasattr(repo, 'dirstate'):
return
class inotifydirstate(repo.dirstate.__class__):
@@ -48,8 +45,7 @@ def reposetup(ui, repo):
files = match.files()
if '.' in files:
files = []
- if (self._inotifyon and not ignored and not subrepos and
- not self._dirty):
+ if self._inotifyon and not ignored and not subrepos and not self._dirty:
cli = client(ui, repo)
try:
result = cli.statusquery(files, match, False,
diff --git a/hgext/inotify/linuxserver.py b/hgext/inotify/linuxserver.py
index a92b540..e2b9115 100644
--- a/hgext/inotify/linuxserver.py
+++ b/hgext/inotify/linuxserver.py
@@ -7,7 +7,7 @@
# GNU General Public License version 2 or any later version.
from mercurial.i18n import _
-from mercurial import osutil, util, error
+from mercurial import osutil, util
import server
import errno, os, select, stat, sys, time
@@ -431,10 +431,7 @@ class master(object):
def shutdown(self):
for obj in pollable.instances.itervalues():
- try:
- obj.shutdown()
- except error.SignalInterrupt:
- pass
+ obj.shutdown()
def run(self):
self.repowatcher.setup()
diff --git a/hgext/inotify/server.py b/hgext/inotify/server.py
index b654b17..b2dcaad 100644
--- a/hgext/inotify/server.py
+++ b/hgext/inotify/server.py
@@ -355,7 +355,7 @@ class socketlistener(object):
except (OSError, socket.error), inst:
try:
os.unlink(self.realsockpath)
- except OSError:
+ except:
pass
os.rmdir(tempdir)
if inst.errno == errno.EEXIST:
@@ -416,7 +416,7 @@ class socketlistener(object):
# try to send back our version to the client
# this way, the client too is informed of the mismatch
sock.sendall(chr(common.version))
- except socket.error:
+ except:
pass
return
diff --git a/hgext/interhg.py b/hgext/interhg.py
index a998a35..60c4255 100644
--- a/hgext/interhg.py
+++ b/hgext/interhg.py
@@ -28,8 +28,6 @@ from mercurial.hgweb import hgweb_mod
from mercurial import templatefilters, extensions
from mercurial.i18n import _
-testedwith = 'internal'
-
interhg_table = []
def uisetup(ui):
diff --git a/hgext/keyword.py b/hgext/keyword.py
index 54bab17..90654b9 100644
--- a/hgext/keyword.py
+++ b/hgext/keyword.py
@@ -1,6 +1,6 @@
# keyword.py - $Keyword$ expansion for Mercurial
#
-# Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net>
+# Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
@@ -92,7 +92,6 @@ commands.optionalrepo += ' kwdemo'
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
# hg commands that do not act on keywords
nokwcommands = ('add addremove annotate bundle export grep incoming init log'
@@ -188,7 +187,7 @@ class kwtemplater(object):
self.repo = repo
self.match = match.match(repo.root, '', [], inc, exc)
self.restrict = kwtools['hgcmd'] in restricted.split()
- self.postcommit = False
+ self.record = False
kwmaps = self.ui.configitems('keywordmaps')
if kwmaps: # override default templates
@@ -238,26 +237,22 @@ class kwtemplater(object):
def iskwfile(self, cand, ctx):
'''Returns subset of candidates which are configured for keyword
- expansion but are not symbolic links.'''
- return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
+ expansion are not symbolic links.'''
+ return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
'''Overwrites selected files expanding/shrinking keywords.'''
- if self.restrict or lookup or self.postcommit: # exclude kw_copy
+ if self.restrict or lookup or self.record: # exclude kw_copy
candidates = self.iskwfile(candidates, ctx)
if not candidates:
return
kwcmd = self.restrict and lookup # kwexpand/kwshrink
if self.restrict or expand and lookup:
mf = ctx.manifest()
- if self.restrict or rekw:
- re_kw = self.rekw
- else:
- re_kw = self.rekwexp
- if expand:
- msg = _('overwriting %s expanding keywords\n')
- else:
- msg = _('overwriting %s shrinking keywords\n')
+ lctx = ctx
+ re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
+ msg = (expand and _('overwriting %s expanding keywords\n')
+ or _('overwriting %s shrinking keywords\n'))
for f in candidates:
if self.restrict:
data = self.repo.file(f).read(mf[f])
@@ -267,20 +262,21 @@ class kwtemplater(object):
continue
if expand:
if lookup:
- ctx = self.linkctx(f, mf[f])
- data, found = self.substitute(data, f, ctx, re_kw.subn)
+ lctx = self.linkctx(f, mf[f])
+ data, found = self.substitute(data, f, lctx, re_kw.subn)
elif self.restrict:
found = re_kw.search(data)
else:
data, found = _shrinktext(data, re_kw.subn)
if found:
self.ui.note(msg % f)
- fp = self.repo.wopener(f, "wb", atomictemp=True)
- fp.write(data)
- fp.close()
+ fpath = self.repo.wjoin(f)
+ mode = os.lstat(fpath).st_mode
+ self.repo.wwrite(f, data, ctx.flags(f))
+ os.chmod(fpath, mode)
if kwcmd:
self.repo.dirstate.normal(f)
- elif self.postcommit:
+ elif self.record:
self.repo.dirstate.normallookup(f)
def shrink(self, fname, text):
@@ -300,9 +296,7 @@ class kwtemplater(object):
def wread(self, fname, data):
'''If in restricted mode returns data read from wdir with
keyword substitutions removed.'''
- if self.restrict:
- return self.shrink(fname, data)
- return data
+ return self.restrict and self.shrink(fname, data) or data
class kwfilelog(filelog.filelog):
'''
@@ -331,11 +325,11 @@ class kwfilelog(filelog.filelog):
text = self.kwt.shrink(self.path, text)
return super(kwfilelog, self).cmp(node, text)
-def _status(ui, repo, wctx, kwt, *pats, **opts):
+def _status(ui, repo, kwt, *pats, **opts):
'''Bails out if [keyword] configuration is not active.
Returns status of working directory.'''
if kwt:
- return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
+ return repo.status(match=scmutil.match(repo[None], pats, opts), clean=True,
unknown=opts.get('unknown') or opts.get('all'))
if ui.configitems('keyword'):
raise util.Abort(_('[keyword] patterns cannot match'))
@@ -349,7 +343,7 @@ def _kwfwrite(ui, repo, expand, *pats, **opts):
kwt = kwtools['templater']
wlock = repo.wlock()
try:
- status = _status(ui, repo, wctx, kwt, *pats, **opts)
+ status = _status(ui, repo, kwt, *pats, **opts)
modified, added, removed, deleted, unknown, ignored, clean = status
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
@@ -421,10 +415,7 @@ def demo(ui, repo, *args, **opts):
ui.setconfig('keywordmaps', k, v)
else:
ui.status(_('\n\tconfiguration using current keyword template maps\n'))
- if uikwmaps:
- kwmaps = dict(uikwmaps)
- else:
- kwmaps = _defaultkwmaps(ui)
+ kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
uisetup(ui)
reposetup(ui, repo)
@@ -442,7 +433,7 @@ def demo(ui, repo, *args, **opts):
if name.split('.', 1)[0].find('commit') > -1:
repo.ui.setconfig('hooks', name, '')
msg = _('hg keyword configuration and expansion example')
- ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
+ ui.note("hg ci -m '%s'\n" % msg)
repo.commit(text=msg)
ui.status(_('\n\tkeywords expanded\n'))
ui.write(repo.wread(fn))
@@ -487,13 +478,13 @@ def files(ui, repo, *pats, **opts):
i = ignored (not tracked)
'''
kwt = kwtools['templater']
- wctx = repo[None]
- status = _status(ui, repo, wctx, kwt, *pats, **opts)
+ status = _status(ui, repo, kwt, *pats, **opts)
cwd = pats and repo.getcwd() or ''
modified, added, removed, deleted, unknown, ignored, clean = status
files = []
if not opts.get('unknown') or opts.get('all'):
files = sorted(modified + added + clean)
+ wctx = repo[None]
kwfiles = kwt.iskwfile(files, wctx)
kwdeleted = kwt.iskwfile(deleted, wctx)
kwunknown = kwt.iskwfile(unknown, wctx)
@@ -505,18 +496,11 @@ def files(ui, repo, *pats, **opts):
showfiles += ([f for f in files if f not in kwfiles],
[f for f in unknown if f not in kwunknown])
kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
- kwstates = zip(kwlabels, 'K!kIi', showfiles)
- fm = ui.formatter('kwfiles', opts)
- fmt = '%.0s%s\n'
- if opts.get('all') or ui.verbose:
- fmt = '%s %s\n'
- for kwstate, char, filenames in kwstates:
- label = 'kwfiles.' + kwstate
+ kwstates = zip('K!kIi', showfiles, kwlabels)
+ for char, filenames, kwstate in kwstates:
+ fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
for f in filenames:
- fm.startitem()
- fm.write('kwstatus path', fmt, char,
- repo.pathto(f, cwd), label=label)
- fm.end()
+ ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
@command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
def shrink(ui, repo, *pats, **opts):
@@ -590,7 +574,7 @@ def reposetup(ui, repo):
def kwcommitctx(self, ctx, error=False):
n = super(kwrepo, self).commitctx(ctx, error)
# no lock needed, only called from repo.commit() which already locks
- if not kwt.postcommit:
+ if not kwt.record:
restrict = kwt.restrict
kwt.restrict = True
kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
@@ -598,12 +582,12 @@ def reposetup(ui, repo):
kwt.restrict = restrict
return n
- def rollback(self, dryrun=False, force=False):
+ def rollback(self, dryrun=False):
wlock = self.wlock()
try:
if not dryrun:
changed = self['.'].files()
- ret = super(kwrepo, self).rollback(dryrun, force)
+ ret = super(kwrepo, self).rollback(dryrun)
if not dryrun:
ctx = self['.']
modified, added = _preselect(self[None].status(), changed)
@@ -632,21 +616,6 @@ def reposetup(ui, repo):
kwt.match = util.never
return orig(web, req, tmpl)
- def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
- '''Wraps cmdutil.amend expanding keywords after amend.'''
- wlock = repo.wlock()
- try:
- kwt.postcommit = True
- newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
- if newid != old.node():
- ctx = repo[newid]
- kwt.restrict = True
- kwt.overwrite(ctx, ctx.files(), False, True)
- kwt.restrict = False
- return newid
- finally:
- wlock.release()
-
def kw_copy(orig, ui, repo, pats, opts, rename=False):
'''Wraps cmdutil.copy so that copy/rename destinations do not
contain expanded keywords.
@@ -657,29 +626,25 @@ def reposetup(ui, repo):
For the latter we have to follow the symlink to find out whether its
target is configured for expansion and we therefore must unexpand the
keywords in the destination.'''
- wlock = repo.wlock()
- try:
- orig(ui, repo, pats, opts, rename)
- if opts.get('dry_run'):
- return
- wctx = repo[None]
- cwd = repo.getcwd()
-
- def haskwsource(dest):
- '''Returns true if dest is a regular file and configured for
- expansion or a symlink which points to a file configured for
- expansion. '''
- source = repo.dirstate.copied(dest)
- if 'l' in wctx.flags(source):
- source = scmutil.canonpath(repo.root, cwd,
- os.path.realpath(source))
- return kwt.match(source)
-
- candidates = [f for f in repo.dirstate.copies() if
- 'l' not in wctx.flags(f) and haskwsource(f)]
- kwt.overwrite(wctx, candidates, False, False)
- finally:
- wlock.release()
+ orig(ui, repo, pats, opts, rename)
+ if opts.get('dry_run'):
+ return
+ wctx = repo[None]
+ cwd = repo.getcwd()
+
+ def haskwsource(dest):
+ '''Returns true if dest is a regular file and configured for
+ expansion or a symlink which points to a file configured for
+ expansion. '''
+ source = repo.dirstate.copied(dest)
+ if 'l' in wctx.flags(source):
+ source = scmutil.canonpath(repo.root, cwd,
+ os.path.realpath(source))
+ return kwt.match(source)
+
+ candidates = [f for f in repo.dirstate.copies() if
+ not 'l' in wctx.flags(f) and haskwsource(f)]
+ kwt.overwrite(wctx, candidates, False, False)
def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
'''Wraps record.dorecord expanding keywords after recording.'''
@@ -687,7 +652,7 @@ def reposetup(ui, repo):
try:
# record returns 0 even when nothing has changed
# therefore compare nodes before and after
- kwt.postcommit = True
+ kwt.record = True
ctx = repo['.']
wstatus = repo[None].status()
ret = orig(ui, repo, commitfunc, *pats, **opts)
@@ -707,8 +672,7 @@ def reposetup(ui, repo):
# not make sense
if (fctx._filerev is None and
(self._repo._encodefilterpats or
- kwt.match(fctx.path()) and 'l' not in fctx.flags() or
- self.size() - 4 == fctx.size()) or
+ kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
self.size() == fctx.size()):
return self._filelog.cmp(self._filenode, fctx.data())
return True
@@ -716,7 +680,6 @@ def reposetup(ui, repo):
extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
extensions.wrapfunction(patch, 'diff', kw_diff)
- extensions.wrapfunction(cmdutil, 'amend', kw_amend)
extensions.wrapfunction(cmdutil, 'copy', kw_copy)
for c in 'annotate changeset rev filediff diff'.split():
extensions.wrapfunction(webcommands, c, kwweb_skip)
diff --git a/hgext/largefiles/CONTRIBUTORS b/hgext/largefiles/CONTRIBUTORS
deleted file mode 100644
index 9bef457..0000000
--- a/hgext/largefiles/CONTRIBUTORS
+++ /dev/null
@@ -1,4 +0,0 @@
-Greg Ward, author of the original bfiles extension
-Na'Tosha Bard of Unity Technologies
-Fog Creek Software
-Special thanks to the University of Toronto and the UCOSP program
diff --git a/hgext/largefiles/__init__.py b/hgext/largefiles/__init__.py
deleted file mode 100644
index 12c80fa..0000000
--- a/hgext/largefiles/__init__.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''track large binary files
-
-Large binary files tend to be not very compressible, not very
-diffable, and not at all mergeable. Such files are not handled
-efficiently by Mercurial's storage format (revlog), which is based on
-compressed binary deltas; storing large binary files as regular
-Mercurial files wastes bandwidth and disk space and increases
-Mercurial's memory usage. The largefiles extension addresses these
-problems by adding a centralized client-server layer on top of
-Mercurial: largefiles live in a *central store* out on the network
-somewhere, and you only fetch the revisions that you need when you
-need them.
-
-largefiles works by maintaining a "standin file" in .hglf/ for each
-largefile. The standins are small (41 bytes: an SHA-1 hash plus
-newline) and are tracked by Mercurial. Largefile revisions are
-identified by the SHA-1 hash of their contents, which is written to
-the standin. largefiles uses that revision ID to get/put largefile
-revisions from/to the central store. This saves both disk space and
-bandwidth, since you don't need to retrieve all historical revisions
-of large files when you clone or pull.
-
-To start a new repository or add new large binary files, just add
---large to your :hg:`add` command. For example::
-
- $ dd if=/dev/urandom of=randomdata count=2000
- $ hg add --large randomdata
- $ hg commit -m 'add randomdata as a largefile'
-
-When you push a changeset that adds/modifies largefiles to a remote
-repository, its largefile revisions will be uploaded along with it.
-Note that the remote Mercurial must also have the largefiles extension
-enabled for this to work.
-
-When you pull a changeset that affects largefiles from a remote
-repository, Mercurial behaves as normal. However, when you update to
-such a revision, any largefiles needed by that revision are downloaded
-and cached (if they have never been downloaded before). This means
-that network access may be required to update to changesets you have
-not previously updated to.
-
-If you already have large files tracked by Mercurial without the
-largefiles extension, you will need to convert your repository in
-order to benefit from largefiles. This is done with the
-:hg:`lfconvert` command::
-
- $ hg lfconvert --size 10 oldrepo newrepo
-
-In repositories that already have largefiles in them, any new file
-over 10MB will automatically be added as a largefile. To change this
-threshold, set ``largefiles.minsize`` in your Mercurial config file
-to the minimum size in megabytes to track as a largefile, or use the
---lfsize option to the add command (also in megabytes)::
-
- [largefiles]
- minsize = 2
-
- $ hg add --lfsize 2
-
-The ``largefiles.patterns`` config option allows you to specify a list
-of filename patterns (see :hg:`help patterns`) that should always be
-tracked as largefiles::
-
- [largefiles]
- patterns =
- *.jpg
- re:.*\.(png|bmp)$
- library.zip
- content/audio/*
-
-Files that match one of these patterns will be added as largefiles
-regardless of their size.
-
-The ``largefiles.minsize`` and ``largefiles.patterns`` config options
-will be ignored for any repositories not already containing a
-largefile. To add the first largefile to a repository, you must
-explicitly do so with the --large flag passed to the :hg:`add`
-command.
-'''
-
-from mercurial import commands
-
-import lfcommands
-import reposetup
-import uisetup
-
-testedwith = 'internal'
-
-reposetup = reposetup.reposetup
-uisetup = uisetup.uisetup
-
-commands.norepo += " lfconvert"
-
-cmdtable = lfcommands.cmdtable
diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py
deleted file mode 100644
index 55aa4a0..0000000
--- a/hgext/largefiles/basestore.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''base class for store implementations and store-related utility code'''
-
-import binascii
-import re
-
-from mercurial import util, node, hg
-from mercurial.i18n import _
-
-import lfutil
-
-class StoreError(Exception):
- '''Raised when there is a problem getting files from or putting
- files to a central store.'''
- def __init__(self, filename, hash, url, detail):
- self.filename = filename
- self.hash = hash
- self.url = url
- self.detail = detail
-
- def longmessage(self):
- if self.url:
- return ('%s: %s\n'
- '(failed URL: %s)\n'
- % (self.filename, self.detail, self.url))
- else:
- return ('%s: %s\n'
- '(no default or default-push path set in hgrc)\n'
- % (self.filename, self.detail))
-
- def __str__(self):
- return "%s: %s" % (self.url, self.detail)
-
-class basestore(object):
- def __init__(self, ui, repo, url):
- self.ui = ui
- self.repo = repo
- self.url = url
-
- def put(self, source, hash):
- '''Put source file into the store under <filename>/<hash>.'''
- raise NotImplementedError('abstract method')
-
- def exists(self, hashes):
- '''Check to see if the store contains the given hashes.'''
- raise NotImplementedError('abstract method')
-
- def get(self, files):
- '''Get the specified largefiles from the store and write to local
- files under repo.root. files is a list of (filename, hash)
- tuples. Return (success, missing), lists of files successfuly
- downloaded and those not found in the store. success is a list
- of (filename, hash) tuples; missing is a list of filenames that
- we could not get. (The detailed error message will already have
- been presented to the user, so missing is just supplied as a
- summary.)'''
- success = []
- missing = []
- ui = self.ui
-
- at = 0
- for filename, hash in files:
- ui.progress(_('getting largefiles'), at, unit='lfile',
- total=len(files))
- at += 1
- ui.note(_('getting %s:%s\n') % (filename, hash))
-
- storefilename = lfutil.storepath(self.repo, hash)
- tmpfile = util.atomictempfile(storefilename,
- createmode=self.repo.store.createmode)
-
- try:
- hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
- except StoreError, err:
- ui.warn(err.longmessage())
- hhash = ""
-
- if hhash != hash:
- if hhash != "":
- ui.warn(_('%s: data corruption (expected %s, got %s)\n')
- % (filename, hash, hhash))
- tmpfile.discard() # no-op if it's already closed
- missing.append(filename)
- continue
-
- tmpfile.close()
- lfutil.linktousercache(self.repo, hash)
- success.append((filename, hhash))
-
- ui.progress(_('getting largefiles'), None)
- return (success, missing)
-
- def verify(self, revs, contents=False):
- '''Verify the existence (and, optionally, contents) of every big
- file revision referenced by every changeset in revs.
- Return 0 if all is well, non-zero on any errors.'''
- write = self.ui.write
- failed = False
-
- write(_('searching %d changesets for largefiles\n') % len(revs))
- verified = set() # set of (filename, filenode) tuples
-
- for rev in revs:
- cctx = self.repo[rev]
- cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
-
- failed = util.any(self._verifyfile(
- cctx, cset, contents, standin, verified) for standin in cctx)
-
- numrevs = len(verified)
- numlfiles = len(set([fname for (fname, fnode) in verified]))
- if contents:
- write(_('verified contents of %d revisions of %d largefiles\n')
- % (numrevs, numlfiles))
- else:
- write(_('verified existence of %d revisions of %d largefiles\n')
- % (numrevs, numlfiles))
-
- return int(failed)
-
- def _getfile(self, tmpfile, filename, hash):
- '''Fetch one revision of one file from the store and write it
- to tmpfile. Compute the hash of the file on-the-fly as it
- downloads and return the binary hash. Close tmpfile. Raise
- StoreError if unable to download the file (e.g. it does not
- exist in the store).'''
- raise NotImplementedError('abstract method')
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- '''Perform the actual verification of a file in the store.
- '''
- raise NotImplementedError('abstract method')
-
-import localstore, wirestore
-
-_storeprovider = {
- 'file': [localstore.localstore],
- 'http': [wirestore.wirestore],
- 'https': [wirestore.wirestore],
- 'ssh': [wirestore.wirestore],
- }
-
-_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
-
-# During clone this function is passed the src's ui object
-# but it needs the dest's ui object so it can read out of
-# the config file. Use repo.ui instead.
-def _openstore(repo, remote=None, put=False):
- ui = repo.ui
-
- if not remote:
- lfpullsource = getattr(repo, 'lfpullsource', None)
- if lfpullsource:
- path = ui.expandpath(lfpullsource)
- else:
- path = ui.expandpath('default-push', 'default')
-
- # ui.expandpath() leaves 'default-push' and 'default' alone if
- # they cannot be expanded: fallback to the empty string,
- # meaning the current directory.
- if path == 'default-push' or path == 'default':
- path = ''
- remote = repo
- else:
- remote = hg.peer(repo, {}, path)
-
- # The path could be a scheme so use Mercurial's normal functionality
- # to resolve the scheme to a repository and use its path
- path = util.safehasattr(remote, 'url') and remote.url() or remote.path
-
- match = _scheme_re.match(path)
- if not match: # regular filesystem path
- scheme = 'file'
- else:
- scheme = match.group(1)
-
- try:
- storeproviders = _storeprovider[scheme]
- except KeyError:
- raise util.Abort(_('unsupported URL scheme %r') % scheme)
-
- for classobj in storeproviders:
- try:
- return classobj(ui, repo, remote)
- except lfutil.storeprotonotcapable:
- pass
-
- raise util.Abort(_('%s does not appear to be a largefile store') % path)
diff --git a/hgext/largefiles/lfcommands.py b/hgext/largefiles/lfcommands.py
deleted file mode 100644
index de42edd..0000000
--- a/hgext/largefiles/lfcommands.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''High-level command function for lfconvert, plus the cmdtable.'''
-
-import os
-import shutil
-
-from mercurial import util, match as match_, hg, node, context, error, \
- cmdutil, scmutil
-from mercurial.i18n import _
-from mercurial.lock import release
-
-import lfutil
-import basestore
-
-# -- Commands ----------------------------------------------------------
-
-def lfconvert(ui, src, dest, *pats, **opts):
- '''convert a normal repository to a largefiles repository
-
- Convert repository SOURCE to a new repository DEST, identical to
- SOURCE except that certain files will be converted as largefiles:
- specifically, any file that matches any PATTERN *or* whose size is
- above the minimum size threshold is converted as a largefile. The
- size used to determine whether or not to track a file as a
- largefile is the size of the first version of the file. The
- minimum size can be specified either with --size or in
- configuration as ``largefiles.size``.
-
- After running this command you will need to make sure that
- largefiles is enabled anywhere you intend to push the new
- repository.
-
- Use --to-normal to convert largefiles back to normal files; after
- this, the DEST repository can be used without largefiles at all.'''
-
- if opts['to_normal']:
- tolfile = False
- else:
- tolfile = True
- size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
-
- if not hg.islocal(src):
- raise util.Abort(_('%s is not a local Mercurial repo') % src)
- if not hg.islocal(dest):
- raise util.Abort(_('%s is not a local Mercurial repo') % dest)
-
- rsrc = hg.repository(ui, src)
- ui.status(_('initializing destination %s\n') % dest)
- rdst = hg.repository(ui, dest, create=True)
-
- success = False
- dstwlock = dstlock = None
- try:
- # Lock destination to prevent modification while it is converted to.
- # Don't need to lock src because we are just reading from its history
- # which can't change.
- dstwlock = rdst.wlock()
- dstlock = rdst.lock()
-
- # Get a list of all changesets in the source. The easy way to do this
- # is to simply walk the changelog, using changelog.nodesbewteen().
- # Take a look at mercurial/revlog.py:639 for more details.
- # Use a generator instead of a list to decrease memory usage
- ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
- rsrc.heads())[0])
- revmap = {node.nullid: node.nullid}
- if tolfile:
- lfiles = set()
- normalfiles = set()
- if not pats:
- pats = ui.configlist(lfutil.longname, 'patterns', default=[])
- if pats:
- matcher = match_.match(rsrc.root, '', list(pats))
- else:
- matcher = None
-
- lfiletohash = {}
- for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(),
- unit=_('revision'), total=rsrc['tip'].rev())
- _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
- lfiles, normalfiles, matcher, size, lfiletohash)
- ui.progress(_('converting revisions'), None)
-
- if os.path.exists(rdst.wjoin(lfutil.shortname)):
- shutil.rmtree(rdst.wjoin(lfutil.shortname))
-
- for f in lfiletohash.keys():
- if os.path.isfile(rdst.wjoin(f)):
- os.unlink(rdst.wjoin(f))
- try:
- os.removedirs(os.path.dirname(rdst.wjoin(f)))
- except OSError:
- pass
-
- # If there were any files converted to largefiles, add largefiles
- # to the destination repository's requirements.
- if lfiles:
- rdst.requirements.add('largefiles')
- rdst._writerequirements()
- else:
- for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(),
- unit=_('revision'), total=rsrc['tip'].rev())
- _addchangeset(ui, rsrc, rdst, ctx, revmap)
-
- ui.progress(_('converting revisions'), None)
- success = True
- finally:
- rdst.dirstate.clear()
- release(dstlock, dstwlock)
- if not success:
- # we failed, remove the new directory
- shutil.rmtree(rdst.root)
-
-def _addchangeset(ui, rsrc, rdst, ctx, revmap):
- # Convert src parents to dst parents
- parents = _convertparents(ctx, revmap)
-
- # Generate list of changed files
- files = _getchangedfiles(ctx, parents)
-
- def getfilectx(repo, memctx, f):
- if lfutil.standin(f) in files:
- # if the file isn't in the manifest then it was removed
- # or renamed, raise IOError to indicate this
- try:
- fctx = ctx.filectx(lfutil.standin(f))
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- renamed = lfutil.splitstandin(renamed[0])
-
- hash = fctx.data().strip()
- path = lfutil.findfile(rsrc, hash)
- ### TODO: What if the file is not cached?
- data = ''
- fd = None
- try:
- fd = open(path, 'rb')
- data = fd.read()
- finally:
- if fd:
- fd.close()
- return context.memfilectx(f, data, 'l' in fctx.flags(),
- 'x' in fctx.flags(), renamed)
- else:
- return _getnormalcontext(repo.ui, ctx, f, revmap)
-
- dstfiles = []
- for file in files:
- if lfutil.isstandin(file):
- dstfiles.append(lfutil.splitstandin(file))
- else:
- dstfiles.append(file)
- # Commit
- _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
-
-def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
- matcher, size, lfiletohash):
- # Convert src parents to dst parents
- parents = _convertparents(ctx, revmap)
-
- # Generate list of changed files
- files = _getchangedfiles(ctx, parents)
-
- dstfiles = []
- for f in files:
- if f not in lfiles and f not in normalfiles:
- islfile = _islfile(f, ctx, matcher, size)
- # If this file was renamed or copied then copy
- # the lfileness of its predecessor
- if f in ctx.manifest():
- fctx = ctx.filectx(f)
- renamed = fctx.renamed()
- renamedlfile = renamed and renamed[0] in lfiles
- islfile |= renamedlfile
- if 'l' in fctx.flags():
- if renamedlfile:
- raise util.Abort(
- _('renamed/copied largefile %s becomes symlink')
- % f)
- islfile = False
- if islfile:
- lfiles.add(f)
- else:
- normalfiles.add(f)
-
- if f in lfiles:
- dstfiles.append(lfutil.standin(f))
- # largefile in manifest if it has not been removed/renamed
- if f in ctx.manifest():
- fctx = ctx.filectx(f)
- if 'l' in fctx.flags():
- renamed = fctx.renamed()
- if renamed and renamed[0] in lfiles:
- raise util.Abort(_('largefile %s becomes symlink') % f)
-
- # largefile was modified, update standins
- fullpath = rdst.wjoin(f)
- util.makedirs(os.path.dirname(fullpath))
- m = util.sha1('')
- m.update(ctx[f].data())
- hash = m.hexdigest()
- if f not in lfiletohash or lfiletohash[f] != hash:
- try:
- fd = open(fullpath, 'wb')
- fd.write(ctx[f].data())
- finally:
- if fd:
- fd.close()
- executable = 'x' in ctx[f].flags()
- os.chmod(fullpath, lfutil.getmode(executable))
- lfutil.writestandin(rdst, lfutil.standin(f), hash,
- executable)
- lfiletohash[f] = hash
- else:
- # normal file
- dstfiles.append(f)
-
- def getfilectx(repo, memctx, f):
- if lfutil.isstandin(f):
- # if the file isn't in the manifest then it was removed
- # or renamed, raise IOError to indicate this
- srcfname = lfutil.splitstandin(f)
- try:
- fctx = ctx.filectx(srcfname)
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- # standin is always a largefile because largefile-ness
- # doesn't change after rename or copy
- renamed = lfutil.standin(renamed[0])
-
- return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
- fctx.flags(), 'x' in fctx.flags(), renamed)
- else:
- return _getnormalcontext(repo.ui, ctx, f, revmap)
-
- # Commit
- _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
-
-def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
- mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
- getfilectx, ctx.user(), ctx.date(), ctx.extra())
- ret = rdst.commitctx(mctx)
- rdst.setparents(ret)
- revmap[ctx.node()] = rdst.changelog.tip()
-
-# Generate list of changed files
-def _getchangedfiles(ctx, parents):
- files = set(ctx.files())
- if node.nullid not in parents:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- files |= (set(mp1) | set(mp2)) - set(mc)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
- files.add(f)
- return files
-
-# Convert src parents to dst parents
-def _convertparents(ctx, revmap):
- parents = []
- for p in ctx.parents():
- parents.append(revmap[p.node()])
- while len(parents) < 2:
- parents.append(node.nullid)
- return parents
-
-# Get memfilectx for a normal file
-def _getnormalcontext(ui, ctx, f, revmap):
- try:
- fctx = ctx.filectx(f)
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- renamed = renamed[0]
-
- data = fctx.data()
- if f == '.hgtags':
- data = _converttags (ui, revmap, data)
- return context.memfilectx(f, data, 'l' in fctx.flags(),
- 'x' in fctx.flags(), renamed)
-
-# Remap tag data using a revision map
-def _converttags(ui, revmap, data):
- newdata = []
- for line in data.splitlines():
- try:
- id, name = line.split(' ', 1)
- except ValueError:
- ui.warn(_('skipping incorrectly formatted tag %s\n'
- % line))
- continue
- try:
- newid = node.bin(id)
- except TypeError:
- ui.warn(_('skipping incorrectly formatted id %s\n'
- % id))
- continue
- try:
- newdata.append('%s %s\n' % (node.hex(revmap[newid]),
- name))
- except KeyError:
- ui.warn(_('no mapping for id %s\n') % id)
- continue
- return ''.join(newdata)
-
-def _islfile(file, ctx, matcher, size):
- '''Return true if file should be considered a largefile, i.e.
- matcher matches it or it is larger than size.'''
- # never store special .hg* files as largefiles
- if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
- return False
- if matcher and matcher(file):
- return True
- try:
- return ctx.filectx(file).size() >= size * 1024 * 1024
- except error.LookupError:
- return False
-
-def uploadlfiles(ui, rsrc, rdst, files):
- '''upload largefiles to the central store'''
-
- if not files:
- return
-
- store = basestore._openstore(rsrc, rdst, put=True)
-
- at = 0
- ui.debug("sending statlfile command for %d largefiles\n" % len(files))
- retval = store.exists(files)
- files = filter(lambda h: not retval[h], files)
- ui.debug("%d largefiles need to be uploaded\n" % len(files))
-
- for hash in files:
- ui.progress(_('uploading largefiles'), at, unit='largefile',
- total=len(files))
- source = lfutil.findfile(rsrc, hash)
- if not source:
- raise util.Abort(_('largefile %s missing from store'
- ' (needs to be uploaded)') % hash)
- # XXX check for errors here
- store.put(source, hash)
- at += 1
- ui.progress(_('uploading largefiles'), None)
-
-def verifylfiles(ui, repo, all=False, contents=False):
- '''Verify that every big file revision in the current changeset
- exists in the central store. With --contents, also verify that
- the contents of each big file revision are correct (SHA-1 hash
- matches the revision ID). With --all, check every changeset in
- this repository.'''
- if all:
- # Pass a list to the function rather than an iterator because we know a
- # list will work.
- revs = range(len(repo))
- else:
- revs = ['.']
-
- store = basestore._openstore(repo)
- return store.verify(revs, contents=contents)
-
-def cachelfiles(ui, repo, node, filelist=None):
- '''cachelfiles ensures that all largefiles needed by the specified revision
- are present in the repository's largefile cache.
-
- returns a tuple (cached, missing). cached is the list of files downloaded
- by this operation; missing is the list of files that were needed but could
- not be found.'''
- lfiles = lfutil.listlfiles(repo, node)
- if filelist:
- lfiles = set(lfiles) & set(filelist)
- toget = []
-
- for lfile in lfiles:
- # If we are mid-merge, then we have to trust the standin that is in the
- # working copy to have the correct hashvalue. This is because the
- # original hg.merge() already updated the standin as part of the normal
- # merge process -- we just have to udpate the largefile to match.
- if (getattr(repo, "_ismerging", False) and
- os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
- expectedhash = lfutil.readstandin(repo, lfile)
- else:
- expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
-
- # if it exists and its hash matches, it might have been locally
- # modified before updating and the user chose 'local'. in this case,
- # it will not be in any store, so don't look for it.
- if ((not os.path.exists(repo.wjoin(lfile)) or
- expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
- not lfutil.findfile(repo, expectedhash)):
- toget.append((lfile, expectedhash))
-
- if toget:
- store = basestore._openstore(repo)
- ret = store.get(toget)
- return ret
-
- return ([], [])
-
-def downloadlfiles(ui, repo, rev=None):
- matchfn = scmutil.match(repo[None],
- [repo.wjoin(lfutil.shortname)], {})
- def prepare(ctx, fns):
- pass
- totalsuccess = 0
- totalmissing = 0
- for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
- prepare):
- success, missing = cachelfiles(ui, repo, ctx.node())
- totalsuccess += len(success)
- totalmissing += len(missing)
- ui.status(_("%d additional largefiles cached\n") % totalsuccess)
- if totalmissing > 0:
- ui.status(_("%d largefiles failed to download\n") % totalmissing)
- return totalsuccess, totalmissing
-
-def updatelfiles(ui, repo, filelist=None, printmessage=True):
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
-
- if filelist is not None:
- lfiles = [f for f in lfiles if f in filelist]
-
- printed = False
- if printmessage and lfiles:
- ui.status(_('getting changed largefiles\n'))
- printed = True
- cachelfiles(ui, repo, '.', lfiles)
-
- updated, removed = 0, 0
- for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
- # increment the appropriate counter according to _updatelfile's
- # return value
- updated += i > 0 and i or 0
- removed -= i < 0 and i or 0
- if printmessage and (removed or updated) and not printed:
- ui.status(_('getting changed largefiles\n'))
- printed = True
-
- lfdirstate.write()
- if printed and printmessage:
- ui.status(_('%d largefiles updated, %d removed\n') % (updated,
- removed))
- finally:
- wlock.release()
-
-def _updatelfile(repo, lfdirstate, lfile):
- '''updates a single largefile and copies the state of its standin from
- the repository's dirstate to its state in the lfdirstate.
-
- returns 1 if the file was modified, -1 if the file was removed, 0 if the
- file was unchanged, and None if the needed largefile was missing from the
- cache.'''
- ret = 0
- abslfile = repo.wjoin(lfile)
- absstandin = repo.wjoin(lfutil.standin(lfile))
- if os.path.exists(absstandin):
- if os.path.exists(absstandin+'.orig'):
- shutil.copyfile(abslfile, abslfile+'.orig')
- expecthash = lfutil.readstandin(repo, lfile)
- if (expecthash != '' and
- (not os.path.exists(abslfile) or
- expecthash != lfutil.hashfile(abslfile))):
- if not lfutil.copyfromcache(repo, expecthash, lfile):
- # use normallookup() to allocate entry in largefiles dirstate,
- # because lack of it misleads lfilesrepo.status() into
- # recognition that such cache missing files are REMOVED.
- lfdirstate.normallookup(lfile)
- return None # don't try to set the mode
- else:
- # Synchronize largefile dirstate to the last modified time of
- # the file
- lfdirstate.normal(lfile)
- ret = 1
- mode = os.stat(absstandin).st_mode
- if mode != os.stat(abslfile).st_mode:
- os.chmod(abslfile, mode)
- ret = 1
- else:
- # Remove lfiles for which the standin is deleted, unless the
- # lfile is added to the repository again. This happens when a
- # largefile is converted back to a normal file: the standin
- # disappears, but a new (normal) file appears as the lfile.
- if os.path.exists(abslfile) and lfile not in repo[None]:
- util.unlinkpath(abslfile)
- ret = -1
- state = repo.dirstate[lfutil.standin(lfile)]
- if state == 'n':
- # When rebasing, we need to synchronize the standin and the largefile,
- # because otherwise the largefile will get reverted. But for commit's
- # sake, we have to mark the file as unclean.
- if getattr(repo, "_isrebasing", False):
- lfdirstate.normallookup(lfile)
- else:
- lfdirstate.normal(lfile)
- elif state == 'r':
- lfdirstate.remove(lfile)
- elif state == 'a':
- lfdirstate.add(lfile)
- elif state == '?':
- lfdirstate.drop(lfile)
- return ret
-
-def catlfile(repo, lfile, rev, filename):
- hash = lfutil.readstandin(repo, lfile, rev)
- if not lfutil.inusercache(repo.ui, hash):
- store = basestore._openstore(repo)
- success, missing = store.get([(lfile, hash)])
- if len(success) != 1:
- raise util.Abort(
- _('largefile %s is not in cache and could not be downloaded')
- % lfile)
- path = lfutil.usercachepath(repo.ui, hash)
- fpout = cmdutil.makefileobj(repo, filename)
- fpin = open(path, "rb")
- fpout.write(fpin.read())
- fpout.close()
- fpin.close()
- return 0
-
-# -- hg commands declarations ------------------------------------------------
-
-cmdtable = {
- 'lfconvert': (lfconvert,
- [('s', 'size', '',
- _('minimum size (MB) for files to be converted '
- 'as largefiles'),
- 'SIZE'),
- ('', 'to-normal', False,
- _('convert from a largefiles repo to a normal repo')),
- ],
- _('hg lfconvert SOURCE DEST [FILE ...]')),
- }
diff --git a/hgext/largefiles/lfutil.py b/hgext/largefiles/lfutil.py
deleted file mode 100644
index 6a64d89..0000000
--- a/hgext/largefiles/lfutil.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''largefiles utility code: must not import other modules in this package.'''
-
-import os
-import errno
-import platform
-import shutil
-import stat
-
-from mercurial import dirstate, httpconnection, match as match_, util, scmutil
-from mercurial.i18n import _
-
-shortname = '.hglf'
-longname = 'largefiles'
-
-
-# -- Portability wrappers ----------------------------------------------
-
-def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
- return dirstate.walk(matcher, [], unknown, ignored)
-
-def repoadd(repo, list):
- add = repo[None].add
- return add(list)
-
-def reporemove(repo, list, unlink=False):
- def remove(list, unlink):
- wlock = repo.wlock()
- try:
- if unlink:
- for f in list:
- try:
- util.unlinkpath(repo.wjoin(f))
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
- repo[None].forget(list)
- finally:
- wlock.release()
- return remove(list, unlink=unlink)
-
-def repoforget(repo, list):
- forget = repo[None].forget
- return forget(list)
-
-def findoutgoing(repo, remote, force):
- from mercurial import discovery
- common, _anyinc, _heads = discovery.findcommonincoming(repo,
- remote.peer(), force=force)
- return repo.changelog.findmissing(common)
-
-# -- Private worker functions ------------------------------------------
-
-def getminsize(ui, assumelfiles, opt, default=10):
- lfsize = opt
- if not lfsize and assumelfiles:
- lfsize = ui.config(longname, 'minsize', default=default)
- if lfsize:
- try:
- lfsize = float(lfsize)
- except ValueError:
- raise util.Abort(_('largefiles: size must be number (not %s)\n')
- % lfsize)
- if lfsize is None:
- raise util.Abort(_('minimum size for largefiles must be specified'))
- return lfsize
-
-def link(src, dest):
- try:
- util.oslink(src, dest)
- except OSError:
- # if hardlinks fail, fallback on atomic copy
- dst = util.atomictempfile(dest)
- for chunk in util.filechunkiter(open(src, 'rb')):
- dst.write(chunk)
- dst.close()
- os.chmod(dest, os.stat(src).st_mode)
-
-def usercachepath(ui, hash):
- path = ui.configpath(longname, 'usercache', None)
- if path:
- path = os.path.join(path, hash)
- else:
- if os.name == 'nt':
- appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
- if appdata:
- path = os.path.join(appdata, longname, hash)
- elif platform.system() == 'Darwin':
- home = os.getenv('HOME')
- if home:
- path = os.path.join(home, 'Library', 'Caches',
- longname, hash)
- elif os.name == 'posix':
- path = os.getenv('XDG_CACHE_HOME')
- if path:
- path = os.path.join(path, longname, hash)
- else:
- home = os.getenv('HOME')
- if home:
- path = os.path.join(home, '.cache', longname, hash)
- else:
- raise util.Abort(_('unknown operating system: %s\n') % os.name)
- return path
-
-def inusercache(ui, hash):
- path = usercachepath(ui, hash)
- return path and os.path.exists(path)
-
-def findfile(repo, hash):
- if instore(repo, hash):
- repo.ui.note(_('found %s in store\n') % hash)
- return storepath(repo, hash)
- elif inusercache(repo.ui, hash):
- repo.ui.note(_('found %s in system cache\n') % hash)
- path = storepath(repo, hash)
- util.makedirs(os.path.dirname(path))
- link(usercachepath(repo.ui, hash), path)
- return path
- return None
-
-class largefilesdirstate(dirstate.dirstate):
- def __getitem__(self, key):
- return super(largefilesdirstate, self).__getitem__(unixpath(key))
- def normal(self, f):
- return super(largefilesdirstate, self).normal(unixpath(f))
- def remove(self, f):
- return super(largefilesdirstate, self).remove(unixpath(f))
- def add(self, f):
- return super(largefilesdirstate, self).add(unixpath(f))
- def drop(self, f):
- return super(largefilesdirstate, self).drop(unixpath(f))
- def forget(self, f):
- return super(largefilesdirstate, self).forget(unixpath(f))
- def normallookup(self, f):
- return super(largefilesdirstate, self).normallookup(unixpath(f))
-
-def openlfdirstate(ui, repo):
- '''
- Return a dirstate object that tracks largefiles: i.e. its root is
- the repo root, but it is saved in .hg/largefiles/dirstate.
- '''
- admin = repo.join(longname)
- opener = scmutil.opener(admin)
- lfdirstate = largefilesdirstate(opener, ui, repo.root,
- repo.dirstate._validate)
-
- # If the largefiles dirstate does not exist, populate and create
- # it. This ensures that we create it on the first meaningful
- # largefiles operation in a new clone.
- if not os.path.exists(os.path.join(admin, 'dirstate')):
- util.makedirs(admin)
- matcher = getstandinmatcher(repo)
- for standin in dirstatewalk(repo.dirstate, matcher):
- lfile = splitstandin(standin)
- hash = readstandin(repo, lfile)
- lfdirstate.normallookup(lfile)
- try:
- if hash == hashfile(repo.wjoin(lfile)):
- lfdirstate.normal(lfile)
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
- return lfdirstate
-
-def lfdirstatestatus(lfdirstate, repo, rev):
- match = match_.always(repo.root, repo.getcwd())
- s = lfdirstate.status(match, [], False, False, False)
- unsure, modified, added, removed, missing, unknown, ignored, clean = s
- for lfile in unsure:
- if repo[rev][standin(lfile)].data().strip() != \
- hashfile(repo.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- lfdirstate.normal(lfile)
- return (modified, added, removed, missing, unknown, ignored, clean)
-
-def listlfiles(repo, rev=None, matcher=None):
- '''return a list of largefiles in the working copy or the
- specified changeset'''
-
- if matcher is None:
- matcher = getstandinmatcher(repo)
-
- # ignore unknown files in working directory
- return [splitstandin(f)
- for f in repo[rev].walk(matcher)
- if rev is not None or repo.dirstate[f] != '?']
-
-def instore(repo, hash):
- return os.path.exists(storepath(repo, hash))
-
-def storepath(repo, hash):
- return repo.join(os.path.join(longname, hash))
-
-def copyfromcache(repo, hash, filename):
- '''Copy the specified largefile from the repo or system cache to
- filename in the repository. Return true on success or false if the
- file was not found in either cache (which should not happened:
- this is meant to be called only after ensuring that the needed
- largefile exists in the cache).'''
- path = findfile(repo, hash)
- if path is None:
- return False
- util.makedirs(os.path.dirname(repo.wjoin(filename)))
- # The write may fail before the file is fully written, but we
- # don't use atomic writes in the working copy.
- shutil.copy(path, repo.wjoin(filename))
- return True
-
-def copytostore(repo, rev, file, uploaded=False):
- hash = readstandin(repo, file)
- if instore(repo, hash):
- return
- copytostoreabsolute(repo, repo.wjoin(file), hash)
-
-def copyalltostore(repo, node):
- '''Copy all largefiles in a given revision to the store'''
-
- ctx = repo[node]
- for filename in ctx.files():
- if isstandin(filename) and filename in ctx.manifest():
- realfile = splitstandin(filename)
- copytostore(repo, ctx.node(), realfile)
-
-
-def copytostoreabsolute(repo, file, hash):
- util.makedirs(os.path.dirname(storepath(repo, hash)))
- if inusercache(repo.ui, hash):
- link(usercachepath(repo.ui, hash), storepath(repo, hash))
- else:
- dst = util.atomictempfile(storepath(repo, hash),
- createmode=repo.store.createmode)
- for chunk in util.filechunkiter(open(file, 'rb')):
- dst.write(chunk)
- dst.close()
- linktousercache(repo, hash)
-
-def linktousercache(repo, hash):
- path = usercachepath(repo.ui, hash)
- if path:
- util.makedirs(os.path.dirname(path))
- link(storepath(repo, hash), path)
-
-def getstandinmatcher(repo, pats=[], opts={}):
- '''Return a match object that applies pats to the standin directory'''
- standindir = repo.pathto(shortname)
- if pats:
- # patterns supplied: search standin directory relative to current dir
- cwd = repo.getcwd()
- if os.path.isabs(cwd):
- # cwd is an absolute path for hg -R <reponame>
- # work relative to the repository root in this case
- cwd = ''
- pats = [os.path.join(standindir, cwd, pat) for pat in pats]
- elif os.path.isdir(standindir):
- # no patterns: relative to repo root
- pats = [standindir]
- else:
- # no patterns and no standin dir: return matcher that matches nothing
- match = match_.match(repo.root, None, [], exact=True)
- match.matchfn = lambda f: False
- return match
- return getmatcher(repo, pats, opts, showbad=False)
-
-def getmatcher(repo, pats=[], opts={}, showbad=True):
- '''Wrapper around scmutil.match() that adds showbad: if false,
- neuter the match object's bad() method so it does not print any
- warnings about missing files or directories.'''
- match = scmutil.match(repo[None], pats, opts)
-
- if not showbad:
- match.bad = lambda f, msg: None
- return match
-
-def composestandinmatcher(repo, rmatcher):
- '''Return a matcher that accepts standins corresponding to the
- files accepted by rmatcher. Pass the list of files in the matcher
- as the paths specified by the user.'''
- smatcher = getstandinmatcher(repo, rmatcher.files())
- isstandin = smatcher.matchfn
- def composedmatchfn(f):
- return isstandin(f) and rmatcher.matchfn(splitstandin(f))
- smatcher.matchfn = composedmatchfn
-
- return smatcher
-
-def standin(filename):
- '''Return the repo-relative path to the standin for the specified big
- file.'''
- # Notes:
- # 1) Most callers want an absolute path, but _createstandin() needs
- # it repo-relative so lfadd() can pass it to repoadd(). So leave
- # it up to the caller to use repo.wjoin() to get an absolute path.
- # 2) Join with '/' because that's what dirstate always uses, even on
- # Windows. Change existing separator to '/' first in case we are
- # passed filenames from an external source (like the command line).
- return shortname + '/' + util.pconvert(filename)
-
-def isstandin(filename):
- '''Return true if filename is a big file standin. filename must be
- in Mercurial's internal form (slash-separated).'''
- return filename.startswith(shortname + '/')
-
-def splitstandin(filename):
- # Split on / because that's what dirstate always uses, even on Windows.
- # Change local separator to / first just in case we are passed filenames
- # from an external source (like the command line).
- bits = util.pconvert(filename).split('/', 1)
- if len(bits) == 2 and bits[0] == shortname:
- return bits[1]
- else:
- return None
-
-def updatestandin(repo, standin):
- file = repo.wjoin(splitstandin(standin))
- if os.path.exists(file):
- hash = hashfile(file)
- executable = getexecutable(file)
- writestandin(repo, standin, hash, executable)
-
-def readstandin(repo, filename, node=None):
- '''read hex hash from standin for filename at given node, or working
- directory if no node is given'''
- return repo[node][standin(filename)].data().strip()
-
-def writestandin(repo, standin, hash, executable):
- '''write hash to <repo.root>/<standin>'''
- writehash(hash, repo.wjoin(standin), executable)
-
-def copyandhash(instream, outfile):
- '''Read bytes from instream (iterable) and write them to outfile,
- computing the SHA-1 hash of the data along the way. Close outfile
- when done and return the binary hash.'''
- hasher = util.sha1('')
- for data in instream:
- hasher.update(data)
- outfile.write(data)
-
- # Blecch: closing a file that somebody else opened is rude and
- # wrong. But it's so darn convenient and practical! After all,
- # outfile was opened just to copy and hash.
- outfile.close()
-
- return hasher.digest()
-
-def hashrepofile(repo, file):
- return hashfile(repo.wjoin(file))
-
-def hashfile(file):
- if not os.path.exists(file):
- return ''
- hasher = util.sha1('')
- fd = open(file, 'rb')
- for data in blockstream(fd):
- hasher.update(data)
- fd.close()
- return hasher.hexdigest()
-
-class limitreader(object):
- def __init__(self, f, limit):
- self.f = f
- self.limit = limit
-
- def read(self, length):
- if self.limit == 0:
- return ''
- length = length > self.limit and self.limit or length
- self.limit -= length
- return self.f.read(length)
-
- def close(self):
- pass
-
-def blockstream(infile, blocksize=128 * 1024):
- """Generator that yields blocks of data from infile and closes infile."""
- while True:
- data = infile.read(blocksize)
- if not data:
- break
- yield data
- # same blecch as copyandhash() above
- infile.close()
-
-def writehash(hash, filename, executable):
- util.makedirs(os.path.dirname(filename))
- util.writefile(filename, hash + '\n')
- os.chmod(filename, getmode(executable))
-
-def getexecutable(filename):
- mode = os.stat(filename).st_mode
- return ((mode & stat.S_IXUSR) and
- (mode & stat.S_IXGRP) and
- (mode & stat.S_IXOTH))
-
-def getmode(executable):
- if executable:
- return 0755
- else:
- return 0644
-
-def urljoin(first, second, *arg):
- def join(left, right):
- if not left.endswith('/'):
- left += '/'
- if right.startswith('/'):
- right = right[1:]
- return left + right
-
- url = join(first, second)
- for a in arg:
- url = join(url, a)
- return url
-
-def hexsha1(data):
- """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
- object data"""
- h = util.sha1()
- for chunk in util.filechunkiter(data):
- h.update(chunk)
- return h.hexdigest()
-
-def httpsendfile(ui, filename):
- return httpconnection.httpsendfile(ui, filename, 'rb')
-
-def unixpath(path):
- '''Return a version of path normalized for use with the lfdirstate.'''
- return util.pconvert(os.path.normpath(path))
-
-def islfilesrepo(repo):
- return ('largefiles' in repo.requirements and
- util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
-
-class storeprotonotcapable(Exception):
- def __init__(self, storetypes):
- self.storetypes = storetypes
-
-def getcurrentheads(repo):
- branches = repo.branchmap()
- heads = []
- for branch in branches:
- newheads = repo.branchheads(branch)
- heads = heads + newheads
- return heads
-
-def getstandinsstate(repo):
- standins = []
- matcher = getstandinmatcher(repo)
- for standin in dirstatewalk(repo.dirstate, matcher):
- lfile = splitstandin(standin)
- standins.append((lfile, readstandin(repo, lfile)))
- return standins
-
-def getlfilestoupdate(oldstandins, newstandins):
- changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
- filelist = []
- for f in changedstandins:
- if f[0] not in filelist:
- filelist.append(f[0])
- return filelist
diff --git a/hgext/largefiles/localstore.py b/hgext/largefiles/localstore.py
deleted file mode 100644
index 4995743..0000000
--- a/hgext/largefiles/localstore.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''store class for local filesystem'''
-
-import os
-
-from mercurial import util
-from mercurial.i18n import _
-
-import lfutil
-import basestore
-
-class localstore(basestore.basestore):
- '''localstore first attempts to grab files out of the store in the remote
- Mercurial repository. Failling that, it attempts to grab the files from
- the user cache.'''
-
- def __init__(self, ui, repo, remote):
- url = os.path.join(remote.local().path, '.hg', lfutil.longname)
- super(localstore, self).__init__(ui, repo, util.expandpath(url))
- self.remote = remote.local()
-
- def put(self, source, hash):
- util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
- if lfutil.instore(self.remote, hash):
- return
- lfutil.link(lfutil.storepath(self.repo, hash),
- lfutil.storepath(self.remote, hash))
-
- def exists(self, hash):
- return lfutil.instore(self.remote, hash)
-
- def _getfile(self, tmpfile, filename, hash):
- if lfutil.instore(self.remote, hash):
- path = lfutil.storepath(self.remote, hash)
- elif lfutil.inusercache(self.ui, hash):
- path = lfutil.usercachepath(self.ui, hash)
- else:
- raise basestore.StoreError(filename, hash, '',
- _("can't get file locally"))
- fd = open(path, 'rb')
- try:
- return lfutil.copyandhash(fd, tmpfile)
- finally:
- fd.close()
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- filename = lfutil.splitstandin(standin)
- if not filename:
- return False
- fctx = cctx[standin]
- key = (filename, fctx.filenode())
- if key in verified:
- return False
-
- expecthash = fctx.data()[0:40]
- verified.add(key)
- if not lfutil.instore(self.remote, expecthash):
- self.ui.warn(
- _('changeset %s: %s missing\n'
- ' (looked for hash %s)\n')
- % (cset, filename, expecthash))
- return True # failed
-
- if contents:
- storepath = lfutil.storepath(self.remote, expecthash)
- actualhash = lfutil.hashfile(storepath)
- if actualhash != expecthash:
- self.ui.warn(
- _('changeset %s: %s: contents differ\n'
- ' (%s:\n'
- ' expected hash %s,\n'
- ' but got %s)\n')
- % (cset, filename, storepath, expecthash, actualhash))
- return True # failed
- return False
diff --git a/hgext/largefiles/overrides.py b/hgext/largefiles/overrides.py
deleted file mode 100644
index 3b42695..0000000
--- a/hgext/largefiles/overrides.py
+++ /dev/null
@@ -1,1080 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''Overridden Mercurial commands and functions for the largefiles extension'''
-
-import os
-import copy
-
-from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
- node, archival, error, merge
-from mercurial.i18n import _
-from mercurial.node import hex
-from hgext import rebase
-
-import lfutil
-import lfcommands
-
-# -- Utility functions: commonly/repeatedly needed functionality ---------------
-
-def installnormalfilesmatchfn(manifest):
- '''overrides scmutil.match so that the matcher it returns will ignore all
- largefiles'''
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- match = oldmatch(ctx, pats, opts, globbed, default)
- m = copy.copy(match)
- notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
- manifest)
- m._files = filter(notlfile, m._files)
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
- return m
- oldmatch = installmatchfn(overridematch)
-
-def installmatchfn(f):
- oldmatch = scmutil.match
- setattr(f, 'oldmatch', oldmatch)
- scmutil.match = f
- return oldmatch
-
-def restorematchfn():
- '''restores scmutil.match to what it was before installnormalfilesmatchfn
- was called. no-op if scmutil.match is its original function.
-
- Note that n calls to installnormalfilesmatchfn will require n calls to
- restore matchfn to reverse'''
- scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
-
-def addlargefiles(ui, repo, *pats, **opts):
- large = opts.pop('large', None)
- lfsize = lfutil.getminsize(
- ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
-
- lfmatcher = None
- if lfutil.islfilesrepo(repo):
- lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
- if lfpats:
- lfmatcher = match_.match(repo.root, '', list(lfpats))
-
- lfnames = []
- m = scmutil.match(repo[None], pats, opts)
- m.bad = lambda x, y: None
- wctx = repo[None]
- for f in repo.walk(m):
- exact = m.exact(f)
- lfile = lfutil.standin(f) in wctx
- nfile = f in wctx
- exists = lfile or nfile
-
- # Don't warn the user when they attempt to add a normal tracked file.
- # The normal add code will do that for us.
- if exact and exists:
- if lfile:
- ui.warn(_('%s already a largefile\n') % f)
- continue
-
- if (exact or not exists) and not lfutil.isstandin(f):
- wfile = repo.wjoin(f)
-
- # In case the file was removed previously, but not committed
- # (issue3507)
- if not os.path.exists(wfile):
- continue
-
- abovemin = (lfsize and
- os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
- if large or abovemin or (lfmatcher and lfmatcher(f)):
- lfnames.append(f)
- if ui.verbose or not exact:
- ui.status(_('adding %s as a largefile\n') % m.rel(f))
-
- bad = []
- standins = []
-
- # Need to lock, otherwise there could be a race condition between
- # when standins are created and added to the repo.
- wlock = repo.wlock()
- try:
- if not opts.get('dry_run'):
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in lfnames:
- standinname = lfutil.standin(f)
- lfutil.writestandin(repo, standinname, hash='',
- executable=lfutil.getexecutable(repo.wjoin(f)))
- standins.append(standinname)
- if lfdirstate[f] == 'r':
- lfdirstate.normallookup(f)
- else:
- lfdirstate.add(f)
- lfdirstate.write()
- bad += [lfutil.splitstandin(f)
- for f in lfutil.repoadd(repo, standins)
- if f in m.files()]
- finally:
- wlock.release()
- return bad
-
-def removelargefiles(ui, repo, *pats, **opts):
- after = opts.get('after')
- if not pats and not after:
- raise util.Abort(_('no files specified'))
- m = scmutil.match(repo[None], pats, opts)
- try:
- repo.lfstatus = True
- s = repo.status(match=m, clean=True)
- finally:
- repo.lfstatus = False
- manifest = repo[None].manifest()
- modified, added, deleted, clean = [[f for f in list
- if lfutil.standin(f) in manifest]
- for list in [s[0], s[1], s[3], s[6]]]
-
- def warn(files, reason):
- for f in files:
- ui.warn(_('not removing %s: %s (use forget to undo)\n')
- % (m.rel(f), reason))
-
- if after:
- remove, forget = deleted, []
- warn(modified + added + clean, _('file still exists'))
- else:
- remove, forget = deleted + clean, []
- warn(modified, _('file is modified'))
- warn(added, _('file has been marked for add'))
-
- for f in sorted(remove + forget):
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- # Need to lock because standin files are deleted then removed from the
- # repository and we could race inbetween.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in remove:
- if not after:
- # If this is being called by addremove, notify the user that we
- # are removing the file.
- if getattr(repo, "_isaddremove", False):
- ui.status(_('removing %s\n') % f)
- if os.path.exists(repo.wjoin(f)):
- util.unlinkpath(repo.wjoin(f))
- lfdirstate.remove(f)
- lfdirstate.write()
- forget = [lfutil.standin(f) for f in forget]
- remove = [lfutil.standin(f) for f in remove]
- lfutil.repoforget(repo, forget)
- # If this is being called by addremove, let the original addremove
- # function handle this.
- if not getattr(repo, "_isaddremove", False):
- lfutil.reporemove(repo, remove, unlink=True)
- else:
- lfutil.reporemove(repo, remove, unlink=False)
- finally:
- wlock.release()
-
-# For overriding mercurial.hgweb.webcommands so that largefiles will
-# appear at their right place in the manifests.
-def decodepath(orig, path):
- return lfutil.splitstandin(path) or path
-
-# -- Wrappers: modify existing commands --------------------------------
-
-# Add works by going through the files that the user wanted to add and
-# checking if they should be added as largefiles. Then it makes a new
-# matcher which matches only the normal files and runs the original
-# version of add.
-def overrideadd(orig, ui, repo, *pats, **opts):
- normal = opts.pop('normal')
- if normal:
- if opts.get('large'):
- raise util.Abort(_('--normal cannot be used with --large'))
- return orig(ui, repo, *pats, **opts)
- bad = addlargefiles(ui, repo, *pats, **opts)
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, *pats, **opts)
- restorematchfn()
-
- return (result == 1 or bad) and 1 or 0
-
-def overrideremove(orig, ui, repo, *pats, **opts):
- installnormalfilesmatchfn(repo[None].manifest())
- orig(ui, repo, *pats, **opts)
- restorematchfn()
- removelargefiles(ui, repo, *pats, **opts)
-
-def overridestatusfn(orig, repo, rev2, **opts):
- try:
- repo._repo.lfstatus = True
- return orig(repo, rev2, **opts)
- finally:
- repo._repo.lfstatus = False
-
-def overridestatus(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- return orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
-def overridedirty(orig, repo, ignoreupdate=False):
- try:
- repo._repo.lfstatus = True
- return orig(repo, ignoreupdate)
- finally:
- repo._repo.lfstatus = False
-
-def overridelog(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
-def overrideverify(orig, ui, repo, *pats, **opts):
- large = opts.pop('large', False)
- all = opts.pop('lfa', False)
- contents = opts.pop('lfc', False)
-
- result = orig(ui, repo, *pats, **opts)
- if large:
- result = result or lfcommands.verifylfiles(ui, repo, all, contents)
- return result
-
-# Override needs to refresh standins so that update's normal merge
-# will go through properly. Then the other update hook (overriding repo.update)
-# will get the new files. Filemerge is also overriden so that the merge
-# will merge standins correctly.
-def overrideupdate(orig, ui, repo, *pats, **opts):
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
- False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
-
- # Need to lock between the standins getting updated and their
- # largefiles getting updated
- wlock = repo.wlock()
- try:
- if opts['check']:
- mod = len(modified) > 0
- for lfile in unsure:
- standin = lfutil.standin(lfile)
- if repo['.'][standin].data().strip() != \
- lfutil.hashfile(repo.wjoin(lfile)):
- mod = True
- else:
- lfdirstate.normal(lfile)
- lfdirstate.write()
- if mod:
- raise util.Abort(_('uncommitted local changes'))
- # XXX handle removed differently
- if not opts['clean']:
- for lfile in unsure + modified + added:
- lfutil.updatestandin(repo, lfutil.standin(lfile))
- finally:
- wlock.release()
- return orig(ui, repo, *pats, **opts)
-
-# Before starting the manifest merge, merge.updates will call
-# _checkunknown to check if there are any files in the merged-in
-# changeset that collide with unknown files in the working copy.
-#
-# The largefiles are seen as unknown, so this prevents us from merging
-# in a file 'foo' if we already have a largefile with the same name.
-#
-# The overridden function filters the unknown files by removing any
-# largefiles. This makes the merge proceed and we can then handle this
-# case further in the overridden manifestmerge function below.
-def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
- if lfutil.standin(f) in wctx:
- return False
- return origfn(repo, wctx, mctx, f)
-
-# The manifest merge handles conflicts on the manifest level. We want
-# to handle changes in largefile-ness of files at this level too.
-#
-# The strategy is to run the original manifestmerge and then process
-# the action list it outputs. There are two cases we need to deal with:
-#
-# 1. Normal file in p1, largefile in p2. Here the largefile is
-# detected via its standin file, which will enter the working copy
-# with a "get" action. It is not "merge" since the standin is all
-# Mercurial is concerned with at this level -- the link to the
-# existing normal file is not relevant here.
-#
-# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
-# since the largefile will be present in the working copy and
-# different from the normal file in p2. Mercurial therefore
-# triggers a merge action.
-#
-# In both cases, we prompt the user and emit new actions to either
-# remove the standin (if the normal file was kept) or to remove the
-# normal file and get the standin (if the largefile was kept). The
-# default prompt answer is to use the largefile version since it was
-# presumably changed on purpose.
-#
-# Finally, the merge.applyupdates function will then take care of
-# writing the files into the working copy and lfcommands.updatelfiles
-# will update the largefiles.
-def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
- actions = origfn(repo, p1, p2, pa, overwrite, partial)
- processed = []
-
- for action in actions:
- if overwrite:
- processed.append(action)
- continue
- f, m = action[:2]
-
- choices = (_('&Largefile'), _('&Normal file'))
- if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
- # Case 1: normal file in the working copy, largefile in
- # the second parent
- lfile = lfutil.splitstandin(f)
- standin = f
- msg = _('%s has been turned into a largefile\n'
- 'use (l)argefile or keep as (n)ormal file?') % lfile
- if repo.ui.promptchoice(msg, choices, 0) == 0:
- processed.append((lfile, "r"))
- processed.append((standin, "g", p2.flags(standin)))
- else:
- processed.append((standin, "r"))
- elif m == "g" and lfutil.standin(f) in p1 and f in p2:
- # Case 2: largefile in the working copy, normal file in
- # the second parent
- standin = lfutil.standin(f)
- lfile = f
- msg = _('%s has been turned into a normal file\n'
- 'keep as (l)argefile or use (n)ormal file?') % lfile
- if repo.ui.promptchoice(msg, choices, 0) == 0:
- processed.append((lfile, "r"))
- else:
- processed.append((standin, "r"))
- processed.append((lfile, "g", p2.flags(lfile)))
- else:
- processed.append(action)
-
- return processed
-
-# Override filemerge to prompt the user about how they wish to merge
-# largefiles. This will handle identical edits, and copy/rename +
-# edit without prompting the user.
-def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
- # Use better variable names here. Because this is a wrapper we cannot
- # change the variable names in the function declaration.
- fcdest, fcother, fcancestor = fcd, fco, fca
- if not lfutil.isstandin(orig):
- return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
- else:
- if not fcother.cmp(fcdest): # files identical?
- return None
-
- # backwards, use working dir parent as ancestor
- if fcancestor == fcother:
- fcancestor = fcdest.parents()[0]
-
- if orig != fcother.path():
- repo.ui.status(_('merging %s and %s to %s\n')
- % (lfutil.splitstandin(orig),
- lfutil.splitstandin(fcother.path()),
- lfutil.splitstandin(fcdest.path())))
- else:
- repo.ui.status(_('merging %s\n')
- % lfutil.splitstandin(fcdest.path()))
-
- if fcancestor.path() != fcother.path() and fcother.data() == \
- fcancestor.data():
- return 0
- if fcancestor.path() != fcdest.path() and fcdest.data() == \
- fcancestor.data():
- repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
- return 0
-
- if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
- 'keep (l)ocal or take (o)ther?') %
- lfutil.splitstandin(orig),
- (_('&Local'), _('&Other')), 0) == 0:
- return 0
- else:
- repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
- return 0
-
-# Copy first changes the matchers to match standins instead of
-# largefiles. Then it overrides util.copyfile in that function it
-# checks if the destination largefile already exists. It also keeps a
-# list of copied files so that the largefiles can be copied and the
-# dirstate updated.
-def overridecopy(orig, ui, repo, pats, opts, rename=False):
- # doesn't remove largefile on rename
- if len(pats) < 2:
- # this isn't legal, let the original function deal with it
- return orig(ui, repo, pats, opts, rename)
-
- def makestandin(relpath):
- path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
- return os.path.join(repo.wjoin(lfutil.standin(path)))
-
- fullpats = scmutil.expandpats(pats)
- dest = fullpats[-1]
-
- if os.path.isdir(dest):
- if not os.path.isdir(makestandin(dest)):
- os.makedirs(makestandin(dest))
- # This could copy both lfiles and normal files in one command,
- # but we don't want to do that. First replace their matcher to
- # only match normal files and run it, then replace it to just
- # match largefiles and run it again.
- nonormalfiles = False
- nolfiles = False
- try:
- try:
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, pats, opts, rename)
- except util.Abort, e:
- if str(e) != _('no files to copy'):
- raise e
- else:
- nonormalfiles = True
- result = 0
- finally:
- restorematchfn()
-
- # The first rename can cause our current working directory to be removed.
- # In that case there is nothing left to copy/rename so just quit.
- try:
- repo.getcwd()
- except OSError:
- return result
-
- try:
- try:
- # When we call orig below it creates the standins but we don't add
- # them to the dir state until later so lock during that time.
- wlock = repo.wlock()
-
- manifest = repo[None].manifest()
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- newpats = []
- # The patterns were previously mangled to add the standin
- # directory; we need to remove that now
- for pat in pats:
- if match_.patkind(pat) is None and lfutil.shortname in pat:
- newpats.append(pat.replace(lfutil.shortname, ''))
- else:
- newpats.append(pat)
- match = oldmatch(ctx, newpats, opts, globbed, default)
- m = copy.copy(match)
- lfile = lambda f: lfutil.standin(f) in manifest
- m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- m.matchfn = lambda f: (lfutil.isstandin(f) and
- (f in manifest) and
- origmatchfn(lfutil.splitstandin(f)) or
- None)
- return m
- oldmatch = installmatchfn(overridematch)
- listpats = []
- for pat in pats:
- if match_.patkind(pat) is not None:
- listpats.append(pat)
- else:
- listpats.append(makestandin(pat))
-
- try:
- origcopyfile = util.copyfile
- copiedfiles = []
- def overridecopyfile(src, dest):
- if (lfutil.shortname in src and
- dest.startswith(repo.wjoin(lfutil.shortname))):
- destlfile = dest.replace(lfutil.shortname, '')
- if not opts['force'] and os.path.exists(destlfile):
- raise IOError('',
- _('destination largefile already exists'))
- copiedfiles.append((src, dest))
- origcopyfile(src, dest)
-
- util.copyfile = overridecopyfile
- result += orig(ui, repo, listpats, opts, rename)
- finally:
- util.copyfile = origcopyfile
-
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for (src, dest) in copiedfiles:
- if (lfutil.shortname in src and
- dest.startswith(repo.wjoin(lfutil.shortname))):
- srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
- destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
- destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
- if not os.path.isdir(destlfiledir):
- os.makedirs(destlfiledir)
- if rename:
- os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
- lfdirstate.remove(srclfile)
- else:
- util.copyfile(repo.wjoin(srclfile),
- repo.wjoin(destlfile))
-
- lfdirstate.add(destlfile)
- lfdirstate.write()
- except util.Abort, e:
- if str(e) != _('no files to copy'):
- raise e
- else:
- nolfiles = True
- finally:
- restorematchfn()
- wlock.release()
-
- if nolfiles and nonormalfiles:
- raise util.Abort(_('no files to copy'))
-
- return result
-
-# When the user calls revert, we have to be careful to not revert any
-# changes to other largefiles accidentally. This means we have to keep
-# track of the largefiles that are being reverted so we only pull down
-# the necessary largefiles.
-#
-# Standins are only updated (to match the hash of largefiles) before
-# commits. Update the standins then run the original revert, changing
-# the matcher to hit standins instead of largefiles. Based on the
-# resulting standins update the largefiles. Then return the standins
-# to their proper state
-def overriderevert(orig, ui, repo, *pats, **opts):
- # Because we put the standins in a bad state (by updating them)
- # and then return them to a correct state we need to lock to
- # prevent others from changing them in their incorrect state.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- (modified, added, removed, missing, unknown, ignored, clean) = \
- lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
- for lfile in modified:
- lfutil.updatestandin(repo, lfutil.standin(lfile))
- for lfile in missing:
- if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
- os.unlink(repo.wjoin(lfutil.standin(lfile)))
-
- try:
- ctx = scmutil.revsingle(repo, opts.get('rev'))
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- match = oldmatch(ctx, pats, opts, globbed, default)
- m = copy.copy(match)
- def tostandin(f):
- if lfutil.standin(f) in ctx:
- return lfutil.standin(f)
- elif lfutil.standin(f) in repo[None]:
- return None
- return f
- m._files = [tostandin(f) for f in m._files]
- m._files = [f for f in m._files if f is not None]
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- def matchfn(f):
- if lfutil.isstandin(f):
- # We need to keep track of what largefiles are being
- # matched so we know which ones to update later --
- # otherwise we accidentally revert changes to other
- # largefiles. This is repo-specific, so duckpunch the
- # repo object to keep the list of largefiles for us
- # later.
- if origmatchfn(lfutil.splitstandin(f)) and \
- (f in repo[None] or f in ctx):
- lfileslist = getattr(repo, '_lfilestoupdate', [])
- lfileslist.append(lfutil.splitstandin(f))
- repo._lfilestoupdate = lfileslist
- return True
- else:
- return False
- return origmatchfn(f)
- m.matchfn = matchfn
- return m
- oldmatch = installmatchfn(overridematch)
- scmutil.match
- matches = overridematch(repo[None], pats, opts)
- orig(ui, repo, *pats, **opts)
- finally:
- restorematchfn()
- lfileslist = getattr(repo, '_lfilestoupdate', [])
- lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
- printmessage=False)
-
- # empty out the largefiles list so we start fresh next time
- repo._lfilestoupdate = []
- for lfile in modified:
- if lfile in lfileslist:
- if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
- in repo['.']:
- lfutil.writestandin(repo, lfutil.standin(lfile),
- repo['.'][lfile].data().strip(),
- 'x' in repo['.'][lfile].flags())
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for lfile in added:
- standin = lfutil.standin(lfile)
- if standin not in ctx and (standin in matches or opts.get('all')):
- if lfile in lfdirstate:
- lfdirstate.drop(lfile)
- util.unlinkpath(repo.wjoin(standin))
- lfdirstate.write()
- finally:
- wlock.release()
-
-def hgupdate(orig, repo, node):
- # Only call updatelfiles the standins that have changed to save time
- oldstandins = lfutil.getstandinsstate(repo)
- result = orig(repo, node)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
- return result
-
-def hgclean(orig, repo, node, show_stats=True):
- result = orig(repo, node, show_stats)
- lfcommands.updatelfiles(repo.ui, repo)
- return result
-
-def hgmerge(orig, repo, node, force=None, remind=True):
- # Mark the repo as being in the middle of a merge, so that
- # updatelfiles() will know that it needs to trust the standins in
- # the working copy, not in the standins in the current node
- repo._ismerging = True
- try:
- result = orig(repo, node, force, remind)
- lfcommands.updatelfiles(repo.ui, repo)
- finally:
- repo._ismerging = False
- return result
-
-# When we rebase a repository with remotely changed largefiles, we need to
-# take some extra care so that the largefiles are correctly updated in the
-# working copy
-def overridepull(orig, ui, repo, source=None, **opts):
- revsprepull = len(repo)
- if opts.get('rebase', False):
- repo._isrebasing = True
- try:
- if opts.get('update'):
- del opts['update']
- ui.debug('--update and --rebase are not compatible, ignoring '
- 'the update flag\n')
- del opts['rebase']
- cmdutil.bailifchanged(repo)
- origpostincoming = commands.postincoming
- def _dummy(*args, **kwargs):
- pass
- commands.postincoming = _dummy
- repo.lfpullsource = source
- if not source:
- source = 'default'
- try:
- result = commands.pull(ui, repo, source, **opts)
- finally:
- commands.postincoming = origpostincoming
- revspostpull = len(repo)
- if revspostpull > revsprepull:
- result = result or rebase.rebase(ui, repo)
- finally:
- repo._isrebasing = False
- else:
- repo.lfpullsource = source
- if not source:
- source = 'default'
- oldheads = lfutil.getcurrentheads(repo)
- result = orig(ui, repo, source, **opts)
- # If we do not have the new largefiles for any new heads we pulled, we
- # will run into a problem later if we try to merge or rebase with one of
- # these heads, so cache the largefiles now direclty into the system
- # cache.
- ui.status(_("caching new largefiles\n"))
- numcached = 0
- heads = lfutil.getcurrentheads(repo)
- newheads = set(heads).difference(set(oldheads))
- for head in newheads:
- (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
- numcached += len(cached)
- ui.status(_("%d largefiles cached\n") % numcached)
- if opts.get('all_largefiles'):
- revspostpull = len(repo)
- revs = []
- for rev in xrange(revsprepull + 1, revspostpull):
- revs.append(repo[rev].rev())
- lfcommands.downloadlfiles(ui, repo, revs)
- return result
-
-def overrideclone(orig, ui, source, dest=None, **opts):
- if dest is None:
- dest = hg.defaultdest(source)
- if opts.get('all_largefiles') and not hg.islocal(dest):
- raise util.Abort(_(
- '--all-largefiles is incompatible with non-local destination %s' %
- dest))
- result = hg.clone(ui, opts, source, dest,
- pull=opts.get('pull'),
- stream=opts.get('uncompressed'),
- rev=opts.get('rev'),
- update=True, # required for successful walkchangerevs
- branch=opts.get('branch'))
- if result is None:
- return True
- if opts.get('all_largefiles'):
- sourcerepo, destrepo = result
- success, missing = lfcommands.downloadlfiles(ui, destrepo.local(), None)
- return missing != 0
- return result is None
-
-def overriderebase(orig, ui, repo, **opts):
- repo._isrebasing = True
- try:
- orig(ui, repo, **opts)
- finally:
- repo._isrebasing = False
-
-def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
- prefix=None, mtime=None, subrepos=None):
- # No need to lock because we are only reading history and
- # largefile caches, neither of which are modified.
- lfcommands.cachelfiles(repo.ui, repo, node)
-
- if kind not in archival.archivers:
- raise util.Abort(_("unknown archive type '%s'") % kind)
-
- ctx = repo[node]
-
- if kind == 'files':
- if prefix:
- raise util.Abort(
- _('cannot give prefix when archiving to files'))
- else:
- prefix = archival.tidyprefix(dest, kind, prefix)
-
- def write(name, mode, islink, getdata):
- if matchfn and not matchfn(name):
- return
- data = getdata()
- if decode:
- data = repo.wwritedata(name, data)
- archiver.addfile(prefix + name, mode, islink, data)
-
- archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
-
- if repo.ui.configbool("ui", "archivemeta", True):
- def metadata():
- base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
- hex(repo.changelog.node(0)), hex(node), ctx.branch())
-
- tags = ''.join('tag: %s\n' % t for t in ctx.tags()
- if repo.tagtype(t) == 'global')
- if not tags:
- repo.ui.pushbuffer()
- opts = {'template': '{latesttag}\n{latesttagdistance}',
- 'style': '', 'patch': None, 'git': None}
- cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
- ltags, dist = repo.ui.popbuffer().split('\n')
- tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
- tags += 'latesttagdistance: %s\n' % dist
-
- return base + tags
-
- write('.hg_archival.txt', 0644, False, metadata)
-
- for f in ctx:
- ff = ctx.flags(f)
- getdata = ctx[f].data
- if lfutil.isstandin(f):
- path = lfutil.findfile(repo, getdata().strip())
- if path is None:
- raise util.Abort(
- _('largefile %s not found in repo store or system cache')
- % lfutil.splitstandin(f))
- f = lfutil.splitstandin(f)
-
- def getdatafn():
- fd = None
- try:
- fd = open(path, 'rb')
- return fd.read()
- finally:
- if fd:
- fd.close()
-
- getdata = getdatafn
- write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
-
- if subrepos:
- for subpath in ctx.substate:
- sub = ctx.sub(subpath)
- submatch = match_.narrowmatcher(subpath, matchfn)
- sub.archive(repo.ui, archiver, prefix, submatch)
-
- archiver.done()
-
-def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
- rev = repo._state[1]
- ctx = repo._repo[rev]
-
- lfcommands.cachelfiles(ui, repo._repo, ctx.node())
-
- def write(name, mode, islink, getdata):
- # At this point, the standin has been replaced with the largefile name,
- # so the normal matcher works here without the lfutil variants.
- if match and not match(f):
- return
- data = getdata()
-
- archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
-
- for f in ctx:
- ff = ctx.flags(f)
- getdata = ctx[f].data
- if lfutil.isstandin(f):
- path = lfutil.findfile(repo._repo, getdata().strip())
- if path is None:
- raise util.Abort(
- _('largefile %s not found in repo store or system cache')
- % lfutil.splitstandin(f))
- f = lfutil.splitstandin(f)
-
- def getdatafn():
- fd = None
- try:
- fd = open(os.path.join(prefix, path), 'rb')
- return fd.read()
- finally:
- if fd:
- fd.close()
-
- getdata = getdatafn
-
- write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
-
- for subpath in ctx.substate:
- sub = ctx.sub(subpath)
- submatch = match_.narrowmatcher(subpath, match)
- sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
- submatch)
-
-# If a largefile is modified, the change is not reflected in its
-# standin until a commit. cmdutil.bailifchanged() raises an exception
-# if the repo has uncommitted changes. Wrap it to also check if
-# largefiles were changed. This is used by bisect and backout.
-def overridebailifchanged(orig, repo):
- orig(repo)
- repo.lfstatus = True
- modified, added, removed, deleted = repo.status()[:4]
- repo.lfstatus = False
- if modified or added or removed or deleted:
- raise util.Abort(_('outstanding uncommitted changes'))
-
-# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
-def overridefetch(orig, ui, repo, *pats, **opts):
- repo.lfstatus = True
- modified, added, removed, deleted = repo.status()[:4]
- repo.lfstatus = False
- if modified or added or removed or deleted:
- raise util.Abort(_('outstanding uncommitted changes'))
- return orig(ui, repo, *pats, **opts)
-
-def overrideforget(orig, ui, repo, *pats, **opts):
- installnormalfilesmatchfn(repo[None].manifest())
- orig(ui, repo, *pats, **opts)
- restorematchfn()
- m = scmutil.match(repo[None], pats, opts)
-
- try:
- repo.lfstatus = True
- s = repo.status(match=m, clean=True)
- finally:
- repo.lfstatus = False
- forget = sorted(s[0] + s[1] + s[3] + s[6])
- forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
-
- for f in forget:
- if lfutil.standin(f) not in repo.dirstate and not \
- os.path.isdir(m.rel(lfutil.standin(f))):
- ui.warn(_('not removing %s: file is already untracked\n')
- % m.rel(f))
-
- for f in forget:
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- # Need to lock because standin files are deleted then removed from the
- # repository and we could race inbetween.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in forget:
- if lfdirstate[f] == 'a':
- lfdirstate.drop(f)
- else:
- lfdirstate.remove(f)
- lfdirstate.write()
- lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
- unlink=True)
- finally:
- wlock.release()
-
-def getoutgoinglfiles(ui, repo, dest=None, **opts):
- dest = ui.expandpath(dest or 'default-push', dest or 'default')
- dest, branches = hg.parseurl(dest, opts.get('branch'))
- revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
- if revs:
- revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
-
- try:
- remote = hg.peer(repo, opts, dest)
- except error.RepoError:
- return None
- o = lfutil.findoutgoing(repo, remote, False)
- if not o:
- return None
- o = repo.changelog.nodesbetween(o, revs)[0]
- if opts.get('newest_first'):
- o.reverse()
-
- toupload = set()
- for n in o:
- parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
- ctx = repo[n]
- files = set(ctx.files())
- if len(parents) == 2:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- for f in mp1:
- if f not in mc:
- files.add(f)
- for f in mp2:
- if f not in mc:
- files.add(f)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
- files.add(f)
- toupload = toupload.union(
- set([f for f in files if lfutil.isstandin(f) and f in ctx]))
- return toupload
-
-def overrideoutgoing(orig, ui, repo, dest=None, **opts):
- orig(ui, repo, dest, **opts)
-
- if opts.pop('large', None):
- toupload = getoutgoinglfiles(ui, repo, dest, **opts)
- if toupload is None:
- ui.status(_('largefiles: No remote repo\n'))
- else:
- ui.status(_('largefiles to upload:\n'))
- for file in toupload:
- ui.status(lfutil.splitstandin(file) + '\n')
- ui.status('\n')
-
-def overridesummary(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
- if opts.pop('large', None):
- toupload = getoutgoinglfiles(ui, repo, None, **opts)
- if toupload is None:
- ui.status(_('largefiles: No remote repo\n'))
- else:
- ui.status(_('largefiles: %d to upload\n') % len(toupload))
-
-def overrideaddremove(orig, ui, repo, *pats, **opts):
- if not lfutil.islfilesrepo(repo):
- return orig(ui, repo, *pats, **opts)
- # Get the list of missing largefiles so we can remove them
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
- False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
-
- # Call into the normal remove code, but the removing of the standin, we want
- # to have handled by original addremove. Monkey patching here makes sure
- # we don't remove the standin in the largefiles code, preventing a very
- # confused state later.
- if missing:
- m = [repo.wjoin(f) for f in missing]
- repo._isaddremove = True
- removelargefiles(ui, repo, *m, **opts)
- repo._isaddremove = False
- # Call into the normal add code, and any files that *should* be added as
- # largefiles will be
- addlargefiles(ui, repo, *pats, **opts)
- # Now that we've handled largefiles, hand off to the original addremove
- # function to take care of the rest. Make sure it doesn't do anything with
- # largefiles by installing a matcher that will ignore them.
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, *pats, **opts)
- restorematchfn()
- return result
-
-# Calling purge with --all will cause the largefiles to be deleted.
-# Override repo.status to prevent this from happening.
-def overridepurge(orig, ui, repo, *dirs, **opts):
- oldstatus = repo.status
- def overridestatus(node1='.', node2=None, match=None, ignored=False,
- clean=False, unknown=False, listsubrepos=False):
- r = oldstatus(node1, node2, match, ignored, clean, unknown,
- listsubrepos)
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- modified, added, removed, deleted, unknown, ignored, clean = r
- unknown = [f for f in unknown if lfdirstate[f] == '?']
- ignored = [f for f in ignored if lfdirstate[f] == '?']
- return modified, added, removed, deleted, unknown, ignored, clean
- repo.status = overridestatus
- orig(ui, repo, *dirs, **opts)
- repo.status = oldstatus
-
-def overriderollback(orig, ui, repo, **opts):
- result = orig(ui, repo, **opts)
- merge.update(repo, node=None, branchmerge=False, force=True,
- partial=lfutil.isstandin)
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- lfiles = lfutil.listlfiles(repo)
- oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
- for file in lfiles:
- if file in oldlfiles:
- lfdirstate.normallookup(file)
- else:
- lfdirstate.add(file)
- lfdirstate.write()
- finally:
- wlock.release()
- return result
-
-def overridetransplant(orig, ui, repo, *revs, **opts):
- try:
- oldstandins = lfutil.getstandinsstate(repo)
- repo._istransplanting = True
- result = orig(ui, repo, *revs, **opts)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
- printmessage=True)
- finally:
- repo._istransplanting = False
- return result
-
-def overridecat(orig, ui, repo, file1, *pats, **opts):
- ctx = scmutil.revsingle(repo, opts.get('rev'))
- if not lfutil.standin(file1) in ctx:
- result = orig(ui, repo, file1, *pats, **opts)
- return result
- return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
diff --git a/hgext/largefiles/proto.py b/hgext/largefiles/proto.py
deleted file mode 100644
index de89e32..0000000
--- a/hgext/largefiles/proto.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2011 Fog Creek Software
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os
-import urllib2
-
-from mercurial import error, httppeer, util, wireproto
-from mercurial.wireproto import batchable, future
-from mercurial.i18n import _
-
-import lfutil
-
-LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
- '\n\nPlease enable it in your Mercurial config '
- 'file.\n')
-
-def putlfile(repo, proto, sha):
- '''Put a largefile into a repository's local store and into the
- user cache.'''
- proto.redirect()
-
- path = lfutil.storepath(repo, sha)
- util.makedirs(os.path.dirname(path))
- tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
-
- try:
- try:
- proto.getfile(tmpfp)
- tmpfp._fp.seek(0)
- if sha != lfutil.hexsha1(tmpfp._fp):
- raise IOError(0, _('largefile contents do not match hash'))
- tmpfp.close()
- lfutil.linktousercache(repo, sha)
- except IOError, e:
- repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
- (sha, e.strerror))
- return wireproto.pushres(1)
- finally:
- tmpfp.discard()
-
- return wireproto.pushres(0)
-
-def getlfile(repo, proto, sha):
- '''Retrieve a largefile from the repository-local cache or system
- cache.'''
- filename = lfutil.findfile(repo, sha)
- if not filename:
- raise util.Abort(_('requested largefile %s not present in cache') % sha)
- f = open(filename, 'rb')
- length = os.fstat(f.fileno())[6]
-
- # Since we can't set an HTTP content-length header here, and
- # Mercurial core provides no way to give the length of a streamres
- # (and reading the entire file into RAM would be ill-advised), we
- # just send the length on the first line of the response, like the
- # ssh proto does for string responses.
- def generator():
- yield '%d\n' % length
- for chunk in f:
- yield chunk
- return wireproto.streamres(generator())
-
-def statlfile(repo, proto, sha):
- '''Return '2\n' if the largefile is missing, '1\n' if it has a
- mismatched checksum, or '0\n' if it is in good condition'''
- filename = lfutil.findfile(repo, sha)
- if not filename:
- return '2\n'
- fd = None
- try:
- fd = open(filename, 'rb')
- return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
- finally:
- if fd:
- fd.close()
-
-def wirereposetup(ui, repo):
- class lfileswirerepository(repo.__class__):
- def putlfile(self, sha, fd):
- # unfortunately, httprepository._callpush tries to convert its
- # input file-like into a bundle before sending it, so we can't use
- # it ...
- if issubclass(self.__class__, httppeer.httppeer):
- res = None
- try:
- res = self._call('putlfile', data=fd, sha=sha,
- headers={'content-type':'application/mercurial-0.1'})
- d, output = res.split('\n', 1)
- for l in output.splitlines(True):
- self.ui.warn(_('remote: '), l, '\n')
- return int(d)
- except (ValueError, urllib2.HTTPError):
- self.ui.warn(_('unexpected putlfile response: %s') % res)
- return 1
- # ... but we can't use sshrepository._call because the data=
- # argument won't get sent, and _callpush does exactly what we want
- # in this case: send the data straight through
- else:
- try:
- ret, output = self._callpush("putlfile", fd, sha=sha)
- if ret == "":
- raise error.ResponseError(_('putlfile failed:'),
- output)
- return int(ret)
- except IOError:
- return 1
- except ValueError:
- raise error.ResponseError(
- _('putlfile failed (unexpected response):'), ret)
-
- def getlfile(self, sha):
- stream = self._callstream("getlfile", sha=sha)
- length = stream.readline()
- try:
- length = int(length)
- except ValueError:
- self._abort(error.ResponseError(_("unexpected response:"),
- length))
- return (length, stream)
-
- @batchable
- def statlfile(self, sha):
- f = future()
- result = {'sha': sha}
- yield result, f
- try:
- yield int(f.value)
- except (ValueError, urllib2.HTTPError):
- # If the server returns anything but an integer followed by a
- # newline, newline, it's not speaking our language; if we get
- # an HTTP error, we can't be sure the largefile is present;
- # either way, consider it missing.
- yield 2
-
- repo.__class__ = lfileswirerepository
-
-# advertise the largefiles=serve capability
-def capabilities(repo, proto):
- return capabilitiesorig(repo, proto) + ' largefiles=serve'
-
-# duplicate what Mercurial's new out-of-band errors mechanism does, because
-# clients old and new alike both handle it well
-def webprotorefuseclient(self, message):
- self.req.header([('Content-Type', 'application/hg-error')])
- return message
-
-def sshprotorefuseclient(self, message):
- self.ui.write_err('%s\n-\n' % message)
- self.fout.write('\n')
- self.fout.flush()
-
- return ''
-
-def heads(repo, proto):
- if lfutil.islfilesrepo(repo):
- return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
- return wireproto.heads(repo, proto)
-
-def sshrepocallstream(self, cmd, **args):
- if cmd == 'heads' and self.capable('largefiles'):
- cmd = 'lheads'
- if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
- return ssholdcallstream(self, cmd, **args)
-
-def httprepocallstream(self, cmd, **args):
- if cmd == 'heads' and self.capable('largefiles'):
- cmd = 'lheads'
- if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
- return httpoldcallstream(self, cmd, **args)
diff --git a/hgext/largefiles/remotestore.py b/hgext/largefiles/remotestore.py
deleted file mode 100644
index 6c3d371..0000000
--- a/hgext/largefiles/remotestore.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''remote largefile store; the base class for servestore'''
-
-import urllib2
-
-from mercurial import util
-from mercurial.i18n import _
-from mercurial.wireproto import remotebatch
-
-import lfutil
-import basestore
-
-class remotestore(basestore.basestore):
- '''a largefile store accessed over a network'''
- def __init__(self, ui, repo, url):
- super(remotestore, self).__init__(ui, repo, url)
-
- def put(self, source, hash):
- if self.sendfile(source, hash):
- raise util.Abort(
- _('remotestore: could not put %s to remote store %s')
- % (source, self.url))
- self.ui.debug(
- _('remotestore: put %s to remote store %s') % (source, self.url))
-
- def exists(self, hashes):
- return self._verify(hashes)
-
- def sendfile(self, filename, hash):
- self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
- fd = None
- try:
- try:
- fd = lfutil.httpsendfile(self.ui, filename)
- except IOError, e:
- raise util.Abort(
- _('remotestore: could not open file %s: %s')
- % (filename, str(e)))
- return self._put(hash, fd)
- finally:
- if fd:
- fd.close()
-
- def _getfile(self, tmpfile, filename, hash):
- # quit if the largefile isn't there
- stat = self._stat(hash)
- if stat == 1:
- raise util.Abort(_('remotestore: largefile %s is invalid') % hash)
- elif stat == 2:
- raise util.Abort(_('remotestore: largefile %s is missing') % hash)
-
- try:
- length, infile = self._get(hash)
- except urllib2.HTTPError, e:
- # 401s get converted to util.Aborts; everything else is fine being
- # turned into a StoreError
- raise basestore.StoreError(filename, hash, self.url, str(e))
- except urllib2.URLError, e:
- # This usually indicates a connection problem, so don't
- # keep trying with the other files... they will probably
- # all fail too.
- raise util.Abort('%s: %s' % (self.url, e.reason))
- except IOError, e:
- raise basestore.StoreError(filename, hash, self.url, str(e))
-
- # Mercurial does not close its SSH connections after writing a stream
- if length is not None:
- infile = lfutil.limitreader(infile, length)
- return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
-
- def _verify(self, hashes):
- return self._stat(hashes)
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- filename = lfutil.splitstandin(standin)
- if not filename:
- return False
- fctx = cctx[standin]
- key = (filename, fctx.filenode())
- if key in verified:
- return False
-
- verified.add(key)
-
- stat = self._stat(hash)
- if not stat:
- return False
- elif stat == 1:
- self.ui.warn(
- _('changeset %s: %s: contents differ\n')
- % (cset, filename))
- return True # failed
- elif stat == 2:
- self.ui.warn(
- _('changeset %s: %s missing\n')
- % (cset, filename))
- return True # failed
- else:
- raise RuntimeError('verify failed: unexpected response from '
- 'statlfile (%r)' % stat)
-
- def batch(self):
- '''Support for remote batching.'''
- return remotebatch(self)
-
diff --git a/hgext/largefiles/reposetup.py b/hgext/largefiles/reposetup.py
deleted file mode 100644
index 04ab704..0000000
--- a/hgext/largefiles/reposetup.py
+++ /dev/null
@@ -1,475 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''setup for largefiles repositories: reposetup'''
-import copy
-import types
-import os
-
-from mercurial import context, error, manifest, match as match_, util
-from mercurial import node as node_
-from mercurial.i18n import _
-
-import lfcommands
-import proto
-import lfutil
-
-def reposetup(ui, repo):
- # wire repositories should be given new wireproto functions but not the
- # other largefiles modifications
- if not repo.local():
- return proto.wirereposetup(ui, repo)
-
- for name in ('status', 'commitctx', 'commit', 'push'):
- method = getattr(repo, name)
- if (isinstance(method, types.FunctionType) and
- method.func_name == 'wrap'):
- ui.warn(_('largefiles: repo method %r appears to have already been'
- ' wrapped by another extension: '
- 'largefiles may behave incorrectly\n')
- % name)
-
- class lfilesrepo(repo.__class__):
- lfstatus = False
- def status_nolfiles(self, *args, **kwargs):
- return super(lfilesrepo, self).status(*args, **kwargs)
-
- # When lfstatus is set, return a context that gives the names
- # of largefiles instead of their corresponding standins and
- # identifies the largefiles as always binary, regardless of
- # their actual contents.
- def __getitem__(self, changeid):
- ctx = super(lfilesrepo, self).__getitem__(changeid)
- if self.lfstatus:
- class lfilesmanifestdict(manifest.manifestdict):
- def __contains__(self, filename):
- if super(lfilesmanifestdict,
- self).__contains__(filename):
- return True
- return super(lfilesmanifestdict,
- self).__contains__(lfutil.standin(filename))
- class lfilesctx(ctx.__class__):
- def files(self):
- filenames = super(lfilesctx, self).files()
- return [lfutil.splitstandin(f) or f for f in filenames]
- def manifest(self):
- man1 = super(lfilesctx, self).manifest()
- man1.__class__ = lfilesmanifestdict
- return man1
- def filectx(self, path, fileid=None, filelog=None):
- try:
- if filelog is not None:
- result = super(lfilesctx, self).filectx(
- path, fileid, filelog)
- else:
- result = super(lfilesctx, self).filectx(
- path, fileid)
- except error.LookupError:
- # Adding a null character will cause Mercurial to
- # identify this as a binary file.
- if filelog is not None:
- result = super(lfilesctx, self).filectx(
- lfutil.standin(path), fileid, filelog)
- else:
- result = super(lfilesctx, self).filectx(
- lfutil.standin(path), fileid)
- olddata = result.data
- result.data = lambda: olddata() + '\0'
- return result
- ctx.__class__ = lfilesctx
- return ctx
-
- # Figure out the status of big files and insert them into the
- # appropriate list in the result. Also removes standin files
- # from the listing. Revert to the original status if
- # self.lfstatus is False.
- def status(self, node1='.', node2=None, match=None, ignored=False,
- clean=False, unknown=False, listsubrepos=False):
- listignored, listclean, listunknown = ignored, clean, unknown
- if not self.lfstatus:
- return super(lfilesrepo, self).status(node1, node2, match,
- listignored, listclean, listunknown, listsubrepos)
- else:
- # some calls in this function rely on the old version of status
- self.lfstatus = False
- if isinstance(node1, context.changectx):
- ctx1 = node1
- else:
- ctx1 = repo[node1]
- if isinstance(node2, context.changectx):
- ctx2 = node2
- else:
- ctx2 = repo[node2]
- working = ctx2.rev() is None
- parentworking = working and ctx1 == self['.']
-
- def inctx(file, ctx):
- try:
- if ctx.rev() is None:
- return file in ctx.manifest()
- ctx[file]
- return True
- except KeyError:
- return False
-
- if match is None:
- match = match_.always(self.root, self.getcwd())
-
- # First check if there were files specified on the
- # command line. If there were, and none of them were
- # largefiles, we should just bail here and let super
- # handle it -- thus gaining a big performance boost.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- if match.files() and not match.anypats():
- for f in lfdirstate:
- if match(f):
- break
- else:
- return super(lfilesrepo, self).status(node1, node2,
- match, listignored, listclean,
- listunknown, listsubrepos)
-
- # Create a copy of match that matches standins instead
- # of largefiles.
- def tostandins(files):
- if not working:
- return files
- newfiles = []
- dirstate = repo.dirstate
- for f in files:
- sf = lfutil.standin(f)
- if sf in dirstate:
- newfiles.append(sf)
- elif sf in dirstate.dirs():
- # Directory entries could be regular or
- # standin, check both
- newfiles.extend((f, sf))
- else:
- newfiles.append(f)
- return newfiles
-
- # Create a function that we can use to override what is
- # normally the ignore matcher. We've already checked
- # for ignored files on the first dirstate walk, and
- # unecessarily re-checking here causes a huge performance
- # hit because lfdirstate only knows about largefiles
- def _ignoreoverride(self):
- return False
-
- m = copy.copy(match)
- m._files = tostandins(m._files)
-
- # Get ignored files here even if we weren't asked for them; we
- # must use the result here for filtering later
- result = super(lfilesrepo, self).status(node1, node2, m,
- True, clean, unknown, listsubrepos)
- if working:
- try:
- # Any non-largefiles that were explicitly listed must be
- # taken out or lfdirstate.status will report an error.
- # The status of these files was already computed using
- # super's status.
- # Override lfdirstate's ignore matcher to not do
- # anything
- origignore = lfdirstate._ignore
- lfdirstate._ignore = _ignoreoverride
-
- def sfindirstate(f):
- sf = lfutil.standin(f)
- dirstate = repo.dirstate
- return sf in dirstate or sf in dirstate.dirs()
- match._files = [f for f in match._files
- if sfindirstate(f)]
- # Don't waste time getting the ignored and unknown
- # files again; we already have them
- s = lfdirstate.status(match, [], False,
- listclean, False)
- (unsure, modified, added, removed, missing, unknown,
- ignored, clean) = s
- # Replace the list of ignored and unknown files with
- # the previously caclulated lists, and strip out the
- # largefiles
- lfiles = set(lfdirstate._map)
- ignored = set(result[5]).difference(lfiles)
- unknown = set(result[4]).difference(lfiles)
- if parentworking:
- for lfile in unsure:
- standin = lfutil.standin(lfile)
- if standin not in ctx1:
- # from second parent
- modified.append(lfile)
- elif ctx1[standin].data().strip() \
- != lfutil.hashfile(self.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- lfdirstate.normal(lfile)
- else:
- tocheck = unsure + modified + added + clean
- modified, added, clean = [], [], []
-
- for lfile in tocheck:
- standin = lfutil.standin(lfile)
- if inctx(standin, ctx1):
- if ctx1[standin].data().strip() != \
- lfutil.hashfile(self.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- else:
- added.append(lfile)
- finally:
- # Replace the original ignore function
- lfdirstate._ignore = origignore
-
- for standin in ctx1.manifest():
- if not lfutil.isstandin(standin):
- continue
- lfile = lfutil.splitstandin(standin)
- if not match(lfile):
- continue
- if lfile not in lfdirstate:
- removed.append(lfile)
-
- # Filter result lists
- result = list(result)
-
- # Largefiles are not really removed when they're
- # still in the normal dirstate. Likewise, normal
- # files are not really removed if it's still in
- # lfdirstate. This happens in merges where files
- # change type.
- removed = [f for f in removed if f not in repo.dirstate]
- result[2] = [f for f in result[2] if f not in lfdirstate]
-
- # Unknown files
- unknown = set(unknown).difference(ignored)
- result[4] = [f for f in unknown
- if (repo.dirstate[f] == '?' and
- not lfutil.isstandin(f))]
- # Ignored files were calculated earlier by the dirstate,
- # and we already stripped out the largefiles from the list
- result[5] = ignored
- # combine normal files and largefiles
- normals = [[fn for fn in filelist
- if not lfutil.isstandin(fn)]
- for filelist in result]
- lfiles = (modified, added, removed, missing, [], [], clean)
- result = [sorted(list1 + list2)
- for (list1, list2) in zip(normals, lfiles)]
- else:
- def toname(f):
- if lfutil.isstandin(f):
- return lfutil.splitstandin(f)
- return f
- result = [[toname(f) for f in items] for items in result]
-
- if not listunknown:
- result[4] = []
- if not listignored:
- result[5] = []
- if not listclean:
- result[6] = []
- self.lfstatus = True
- return result
-
- # As part of committing, copy all of the largefiles into the
- # cache.
- def commitctx(self, *args, **kwargs):
- node = super(lfilesrepo, self).commitctx(*args, **kwargs)
- lfutil.copyalltostore(self, node)
- return node
-
- # Before commit, largefile standins have not had their
- # contents updated to reflect the hash of their largefile.
- # Do that here.
- def commit(self, text="", user=None, date=None, match=None,
- force=False, editor=False, extra={}):
- orig = super(lfilesrepo, self).commit
-
- wlock = repo.wlock()
- try:
- # Case 0: Rebase or Transplant
- # We have to take the time to pull down the new largefiles now.
- # Otherwise, any largefiles that were modified in the
- # destination changesets get overwritten, either by the rebase
- # or in the first commit after the rebase or transplant.
- # updatelfiles will update the dirstate to mark any pulled
- # largefiles as modified
- if getattr(repo, "_isrebasing", False) or \
- getattr(repo, "_istransplanting", False):
- lfcommands.updatelfiles(repo.ui, repo, filelist=None,
- printmessage=False)
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
- return result
- # Case 1: user calls commit with no specific files or
- # include/exclude patterns: refresh and commit all files that
- # are "dirty".
- if ((match is None) or
- (not match.anypats() and not match.files())):
- # Spend a bit of time here to get a list of files we know
- # are modified so we can compare only against those.
- # It can cost a lot of time (several seconds)
- # otherwise to update all standins if the largefiles are
- # large.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- dirtymatch = match_.always(repo.root, repo.getcwd())
- s = lfdirstate.status(dirtymatch, [], False, False, False)
- modifiedfiles = []
- for i in s:
- modifiedfiles.extend(i)
- lfiles = lfutil.listlfiles(self)
- # this only loops through largefiles that exist (not
- # removed/renamed)
- for lfile in lfiles:
- if lfile in modifiedfiles:
- if os.path.exists(
- self.wjoin(lfutil.standin(lfile))):
- # this handles the case where a rebase is being
- # performed and the working copy is not updated
- # yet.
- if os.path.exists(self.wjoin(lfile)):
- lfutil.updatestandin(self,
- lfutil.standin(lfile))
- lfdirstate.normal(lfile)
-
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- if result is not None:
- for lfile in lfdirstate:
- if lfile in modifiedfiles:
- if (not os.path.exists(repo.wjoin(
- lfutil.standin(lfile)))) or \
- (not os.path.exists(repo.wjoin(lfile))):
- lfdirstate.drop(lfile)
-
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
- return result
-
- for f in match.files():
- if lfutil.isstandin(f):
- raise util.Abort(
- _('file "%s" is a largefile standin') % f,
- hint=('commit the largefile itself instead'))
-
- # Case 2: user calls commit with specified patterns: refresh
- # any matching big files.
- smatcher = lfutil.composestandinmatcher(self, match)
- standins = lfutil.dirstatewalk(self.dirstate, smatcher)
-
- # No matching big files: get out of the way and pass control to
- # the usual commit() method.
- if not standins:
- return orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- # Refresh all matching big files. It's possible that the
- # commit will end up failing, in which case the big files will
- # stay refreshed. No harm done: the user modified them and
- # asked to commit them, so sooner or later we're going to
- # refresh the standins. Might as well leave them refreshed.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- for standin in standins:
- lfile = lfutil.splitstandin(standin)
- if lfdirstate[lfile] <> 'r':
- lfutil.updatestandin(self, standin)
- lfdirstate.normal(lfile)
- else:
- lfdirstate.drop(lfile)
-
- # Cook up a new matcher that only matches regular files or
- # standins corresponding to the big files requested by the
- # user. Have to modify _files to prevent commit() from
- # complaining "not tracked" for big files.
- lfiles = lfutil.listlfiles(repo)
- match = copy.copy(match)
- origmatchfn = match.matchfn
-
- # Check both the list of largefiles and the list of
- # standins because if a largefile was removed, it
- # won't be in the list of largefiles at this point
- match._files += sorted(standins)
-
- actualfiles = []
- for f in match._files:
- fstandin = lfutil.standin(f)
-
- # ignore known largefiles and standins
- if f in lfiles or fstandin in standins:
- continue
-
- # append directory separator to avoid collisions
- if not fstandin.endswith(os.sep):
- fstandin += os.sep
-
- actualfiles.append(f)
- match._files = actualfiles
-
- def matchfn(f):
- if origmatchfn(f):
- return f not in lfiles
- else:
- return f in standins
-
- match.matchfn = matchfn
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
- return result
- finally:
- wlock.release()
-
- def push(self, remote, force=False, revs=None, newbranch=False):
- o = lfutil.findoutgoing(repo, remote, force)
- if o:
- toupload = set()
- o = repo.changelog.nodesbetween(o, revs)[0]
- for n in o:
- parents = [p for p in repo.changelog.parents(n)
- if p != node_.nullid]
- ctx = repo[n]
- files = set(ctx.files())
- if len(parents) == 2:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- for f in mp1:
- if f not in mc:
- files.add(f)
- for f in mp2:
- if f not in mc:
- files.add(f)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
- None):
- files.add(f)
-
- toupload = toupload.union(
- set([ctx[f].data().strip()
- for f in files
- if lfutil.isstandin(f) and f in ctx]))
- lfcommands.uploadlfiles(ui, self, remote, toupload)
- return super(lfilesrepo, self).push(remote, force, revs,
- newbranch)
-
- repo.__class__ = lfilesrepo
-
- def checkrequireslfiles(ui, repo, **kwargs):
- if 'largefiles' not in repo.requirements and util.any(
- lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
- repo.requirements.add('largefiles')
- repo._writerequirements()
-
- ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
- ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
diff --git a/hgext/largefiles/uisetup.py b/hgext/largefiles/uisetup.py
deleted file mode 100644
index e50190b..0000000
--- a/hgext/largefiles/uisetup.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''setup for largefiles extension: uisetup'''
-
-from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
- httppeer, localrepo, merge, sshpeer, sshserver, wireproto
-from mercurial.i18n import _
-from mercurial.hgweb import hgweb_mod, protocol, webcommands
-from mercurial.subrepo import hgsubrepo
-
-import overrides
-import proto
-
-def uisetup(ui):
- # Disable auto-status for some commands which assume that all
- # files in the result are under Mercurial's control
-
- entry = extensions.wrapcommand(commands.table, 'add',
- overrides.overrideadd)
- addopt = [('', 'large', None, _('add as largefile')),
- ('', 'normal', None, _('add as normal file')),
- ('', 'lfsize', '', _('add all files above this size '
- '(in megabytes) as largefiles '
- '(default: 10)'))]
- entry[1].extend(addopt)
-
- entry = extensions.wrapcommand(commands.table, 'addremove',
- overrides.overrideaddremove)
- entry = extensions.wrapcommand(commands.table, 'remove',
- overrides.overrideremove)
- entry = extensions.wrapcommand(commands.table, 'forget',
- overrides.overrideforget)
-
- # Subrepos call status function
- entry = extensions.wrapcommand(commands.table, 'status',
- overrides.overridestatus)
- entry = extensions.wrapfunction(hgsubrepo, 'status',
- overrides.overridestatusfn)
-
- entry = extensions.wrapcommand(commands.table, 'log',
- overrides.overridelog)
- entry = extensions.wrapcommand(commands.table, 'rollback',
- overrides.overriderollback)
- entry = extensions.wrapcommand(commands.table, 'verify',
- overrides.overrideverify)
-
- verifyopt = [('', 'large', None, _('verify largefiles')),
- ('', 'lfa', None,
- _('verify all revisions of largefiles not just current')),
- ('', 'lfc', None,
- _('verify largefile contents not just existence'))]
- entry[1].extend(verifyopt)
-
- entry = extensions.wrapcommand(commands.table, 'outgoing',
- overrides.overrideoutgoing)
- outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(outgoingopt)
- entry = extensions.wrapcommand(commands.table, 'summary',
- overrides.overridesummary)
- summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(summaryopt)
-
- entry = extensions.wrapcommand(commands.table, 'update',
- overrides.overrideupdate)
- entry = extensions.wrapcommand(commands.table, 'pull',
- overrides.overridepull)
- pullopt = [('', 'all-largefiles', None,
- _('download all pulled versions of largefiles'))]
- entry[1].extend(pullopt)
- entry = extensions.wrapcommand(commands.table, 'clone',
- overrides.overrideclone)
- cloneopt = [('', 'all-largefiles', None,
- _('download all versions of all largefiles'))]
-
- entry[1].extend(cloneopt)
- entry = extensions.wrapcommand(commands.table, 'cat',
- overrides.overridecat)
- entry = extensions.wrapfunction(merge, '_checkunknownfile',
- overrides.overridecheckunknownfile)
- entry = extensions.wrapfunction(merge, 'manifestmerge',
- overrides.overridemanifestmerge)
- entry = extensions.wrapfunction(filemerge, 'filemerge',
- overrides.overridefilemerge)
- entry = extensions.wrapfunction(cmdutil, 'copy',
- overrides.overridecopy)
-
- # Summary calls dirty on the subrepos
- entry = extensions.wrapfunction(hgsubrepo, 'dirty',
- overrides.overridedirty)
-
- # Backout calls revert so we need to override both the command and the
- # function
- entry = extensions.wrapcommand(commands.table, 'revert',
- overrides.overriderevert)
- entry = extensions.wrapfunction(commands, 'revert',
- overrides.overriderevert)
-
- # clone uses hg._update instead of hg.update even though they are the
- # same function... so wrap both of them)
- extensions.wrapfunction(hg, 'update', overrides.hgupdate)
- extensions.wrapfunction(hg, '_update', overrides.hgupdate)
- extensions.wrapfunction(hg, 'clean', overrides.hgclean)
- extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
-
- extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
- extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
- extensions.wrapfunction(cmdutil, 'bailifchanged',
- overrides.overridebailifchanged)
-
- # create the new wireproto commands ...
- wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
- wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
- wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
-
- # ... and wrap some existing ones
- wireproto.commands['capabilities'] = (proto.capabilities, '')
- wireproto.commands['heads'] = (proto.heads, '')
- wireproto.commands['lheads'] = (wireproto.heads, '')
-
- # make putlfile behave the same as push and {get,stat}lfile behave
- # the same as pull w.r.t. permissions checks
- hgweb_mod.perms['putlfile'] = 'push'
- hgweb_mod.perms['getlfile'] = 'pull'
- hgweb_mod.perms['statlfile'] = 'pull'
-
- extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
-
- # the hello wireproto command uses wireproto.capabilities, so it won't see
- # our largefiles capability unless we replace the actual function as well.
- proto.capabilitiesorig = wireproto.capabilities
- wireproto.capabilities = proto.capabilities
-
- # these let us reject non-largefiles clients and make them display
- # our error messages
- protocol.webproto.refuseclient = proto.webprotorefuseclient
- sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
-
- # can't do this in reposetup because it needs to have happened before
- # wirerepo.__init__ is called
- proto.ssholdcallstream = sshpeer.sshpeer._callstream
- proto.httpoldcallstream = httppeer.httppeer._callstream
- sshpeer.sshpeer._callstream = proto.sshrepocallstream
- httppeer.httppeer._callstream = proto.httprepocallstream
-
- # don't die on seeing a repo with the largefiles requirement
- localrepo.localrepository.supported |= set(['largefiles'])
-
- # override some extensions' stuff as well
- for name, module in extensions.extensions():
- if name == 'fetch':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
- overrides.overridefetch)
- if name == 'purge':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
- overrides.overridepurge)
- if name == 'rebase':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
- overrides.overriderebase)
- if name == 'transplant':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
- overrides.overridetransplant)
diff --git a/hgext/largefiles/wirestore.py b/hgext/largefiles/wirestore.py
deleted file mode 100644
index a394cf0..0000000
--- a/hgext/largefiles/wirestore.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2010-2011 Fog Creek Software
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''largefile store working over Mercurial's wire protocol'''
-
-import lfutil
-import remotestore
-
-class wirestore(remotestore.remotestore):
- def __init__(self, ui, repo, remote):
- cap = remote.capable('largefiles')
- if not cap:
- raise lfutil.storeprotonotcapable([])
- storetypes = cap.split(',')
- if 'serve' not in storetypes:
- raise lfutil.storeprotonotcapable(storetypes)
- self.remote = remote
- super(wirestore, self).__init__(ui, repo, remote.url())
-
- def _put(self, hash, fd):
- return self.remote.putlfile(hash, fd)
-
- def _get(self, hash):
- return self.remote.getlfile(hash)
-
- def _stat(self, hashes):
- batch = self.remote.batch()
- futures = {}
- for hash in hashes:
- futures[hash] = batch.statlfile(hash)
- batch.submit()
- retval = {}
- for hash in hashes:
- retval[hash] = not futures[hash].value
- return retval
diff --git a/hgext/mq.py b/hgext/mq.py
index 33a31c4..a1b4e81 100644
--- a/hgext/mq.py
+++ b/hgext/mq.py
@@ -38,32 +38,15 @@ preserving existing git patches upon qrefresh. If set to 'yes' or
'no', mq will override the [diff] section and always generate git or
regular patches, possibly losing data in the second case.
-It may be desirable for mq changesets to be kept in the secret phase (see
-:hg:`help phases`), which can be enabled with the following setting::
-
- [mq]
- secret = True
-
You will by default be managing a patch queue named "patches". You can
create other, independent patch queues with the :hg:`qqueue` command.
-
-If the working directory contains uncommitted files, qpush, qpop and
-qgoto abort immediately. If -f/--force is used, the changes are
-discarded. Setting::
-
- [mq]
- keepchanges = True
-
-make them behave as if --keep-changes were passed, and non-conflicting
-local changes will be tolerated and preserved. If incompatible options
-such as -f/--force or --exact are passed, this setting is ignored.
'''
from mercurial.i18n import _
from mercurial.node import bin, hex, short, nullid, nullrev
from mercurial.lock import release
from mercurial import commands, cmdutil, hg, scmutil, util, revset
-from mercurial import repair, extensions, url, error, phases
+from mercurial import repair, extensions, url, error
from mercurial import patch as patchmod
import os, re, errno, shutil
@@ -73,7 +56,6 @@ seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
# Patch names looks like unix-file names.
# They must be joinable with queue directory and result in the patch path.
@@ -269,32 +251,6 @@ class patchheader(object):
ci += 1
del self.comments[ci]
-def newcommit(repo, phase, *args, **kwargs):
- """helper dedicated to ensure a commit respect mq.secret setting
-
- It should be used instead of repo.commit inside the mq source for operation
- creating new changeset.
- """
- if phase is None:
- if repo.ui.configbool('mq', 'secret', False):
- phase = phases.secret
- if phase is not None:
- backup = repo.ui.backupconfig('phases', 'new-commit')
- # Marking the repository as committing an mq patch can be used
- # to optimize operations like _branchtags().
- repo._committingpatch = True
- try:
- if phase is not None:
- repo.ui.setconfig('phases', 'new-commit', phase)
- return repo.commit(*args, **kwargs)
- finally:
- repo._committingpatch = False
- if phase is not None:
- repo.ui.restoreconfig(backup)
-
-class AbortNoCleanup(error.Abort):
- pass
-
class queue(object):
def __init__(self, ui, path, patchdir=None):
self.basepath = path
@@ -311,8 +267,8 @@ class queue(object):
self.path = patchdir or curpath
self.opener = scmutil.opener(self.path)
self.ui = ui
- self.applieddirty = False
- self.seriesdirty = False
+ self.applieddirty = 0
+ self.seriesdirty = 0
self.added = []
self.seriespath = "series"
self.statuspath = "status"
@@ -323,7 +279,7 @@ class queue(object):
try:
gitmode = ui.configbool('mq', 'git', None)
if gitmode is None:
- raise error.ConfigError
+ raise error.ConfigError()
self.gitmode = gitmode and 'yes' or 'no'
except error.ConfigError:
self.gitmode = ui.config('mq', 'git', 'auto').lower()
@@ -331,31 +287,25 @@ class queue(object):
@util.propertycache
def applied(self):
- def parselines(lines):
- for l in lines:
- entry = l.split(':', 1)
- if len(entry) > 1:
- n, name = entry
- yield statusentry(bin(n), name)
- elif l.strip():
- self.ui.warn(_('malformated mq status line: %s\n') % entry)
- # else we ignore empty lines
- try:
+ if os.path.exists(self.join(self.statuspath)):
+ def parselines(lines):
+ for l in lines:
+ entry = l.split(':', 1)
+ if len(entry) > 1:
+ n, name = entry
+ yield statusentry(bin(n), name)
+ elif l.strip():
+ self.ui.warn(_('malformated mq status line: %s\n') % entry)
+ # else we ignore empty lines
lines = self.opener.read(self.statuspath).splitlines()
return list(parselines(lines))
- except IOError, e:
- if e.errno == errno.ENOENT:
- return []
- raise
+ return []
@util.propertycache
def fullseries(self):
- try:
+ if os.path.exists(self.join(self.seriespath)):
return self.opener.read(self.seriespath).splitlines()
- except IOError, e:
- if e.errno == errno.ENOENT:
- return []
- raise
+ return []
@util.propertycache
def series(self):
@@ -371,8 +321,8 @@ class queue(object):
for a in 'applied fullseries series seriesguards'.split():
if a in self.__dict__:
delattr(self, a)
- self.applieddirty = False
- self.seriesdirty = False
+ self.applieddirty = 0
+ self.seriesdirty = 0
self.guardsdirty = False
self.activeguards = None
@@ -547,13 +497,10 @@ class queue(object):
fp.close()
if self.applieddirty:
writelist(map(str, self.applied), self.statuspath)
- self.applieddirty = False
if self.seriesdirty:
writelist(self.fullseries, self.seriespath)
- self.seriesdirty = False
if self.guardsdirty:
writelist(self.activeguards, self.guardspath)
- self.guardsdirty = False
if self.added:
qrepo = self.qrepo()
if qrepo:
@@ -569,18 +516,6 @@ class queue(object):
except OSError, inst:
self.ui.warn(_('error removing undo: %s\n') % str(inst))
- def backup(self, repo, files, copy=False):
- # backup local changes in --force case
- for f in sorted(files):
- absf = repo.wjoin(f)
- if os.path.lexists(absf):
- self.ui.note(_('saving current version of %s as %s\n') %
- (f, f + '.orig'))
- if copy:
- util.copyfile(absf, absf + '.orig')
- else:
- util.rename(absf, absf + '.orig')
-
def printdiff(self, repo, diffopts, node1, node2=None, files=None,
fp=None, changes=None, opts={}):
stat = opts.get('stat')
@@ -609,12 +544,12 @@ class queue(object):
ret = hg.merge(repo, rev)
if ret:
raise util.Abort(_("update returned %d") % ret)
- n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
+ n = repo.commit(ctx.description(), ctx.user(), force=True)
if n is None:
raise util.Abort(_("repo commit failed"))
try:
ph = patchheader(mergeq.join(patch), self.plainmode)
- except Exception:
+ except:
raise util.Abort(_("unable to read %s") % patch)
diffopts = self.patchopts(diffopts, patch)
@@ -649,10 +584,10 @@ class queue(object):
# the first patch in the queue is never a merge patch
#
pname = ".hg.patches.merge.marker"
- n = newcommit(repo, None, '[mq]: merge marker', force=True)
+ n = repo.commit('[mq]: merge marker', force=True)
self.removeundo(repo)
self.applied.append(statusentry(n, pname))
- self.applieddirty = True
+ self.applieddirty = 1
head = self.qparents(repo)
@@ -673,7 +608,7 @@ class queue(object):
err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
if head:
self.applied.append(statusentry(head, patch))
- self.applieddirty = True
+ self.applieddirty = 1
if err:
return (err, head)
self.savedirty()
@@ -691,12 +626,10 @@ class queue(object):
self.ui.note(str(inst) + '\n')
if not self.ui.verbose:
self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
- self.ui.traceback()
return (False, list(files), False)
def apply(self, repo, series, list=False, update_status=True,
- strict=False, patchdir=None, merge=None, all_files=None,
- tobackup=None, keepchanges=False):
+ strict=False, patchdir=None, merge=None, all_files=None):
wlock = lock = tr = None
try:
wlock = repo.wlock()
@@ -704,36 +637,25 @@ class queue(object):
tr = repo.transaction("qpush")
try:
ret = self._apply(repo, series, list, update_status,
- strict, patchdir, merge, all_files=all_files,
- tobackup=tobackup, keepchanges=keepchanges)
+ strict, patchdir, merge, all_files=all_files)
tr.close()
self.savedirty()
return ret
- except AbortNoCleanup:
- tr.close()
- self.savedirty()
- return 2, repo.dirstate.p1()
- except: # re-raises
+ except:
try:
tr.abort()
finally:
repo.invalidate()
repo.dirstate.invalidate()
- self.invalidate()
raise
finally:
release(tr, lock, wlock)
self.removeundo(repo)
def _apply(self, repo, series, list=False, update_status=True,
- strict=False, patchdir=None, merge=None, all_files=None,
- tobackup=None, keepchanges=False):
- """returns (error, hash)
-
- error = 1 for unable to read, 2 for patch failed, 3 for patch
- fuzz. tobackup is None or a set of files to backup before they
- are modified by a patch.
- """
+ strict=False, patchdir=None, merge=None, all_files=None):
+ '''returns (error, hash)
+ error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
# TODO unify with commands.py
if not patchdir:
patchdir = self.path
@@ -765,14 +687,6 @@ class queue(object):
message = '\n'.join(message)
if ph.haspatch:
- if tobackup:
- touched = patchmod.changedfiles(self.ui, repo, pf)
- touched = set(touched) & tobackup
- if touched and keepchanges:
- raise AbortNoCleanup(
- _("local changes found, refresh first"))
- self.backup(repo, touched, copy=True)
- tobackup = tobackup - touched
(patcherr, files, fuzz) = self.patch(repo, pf)
if all_files is not None:
all_files.update(files)
@@ -795,14 +709,11 @@ class queue(object):
for f in merged:
repo.dirstate.merge(f)
p1, p2 = repo.dirstate.parents()
- repo.setparents(p1, merge)
+ repo.dirstate.setparents(p1, merge)
match = scmutil.matchfiles(repo, files or [])
- oldtip = repo['tip']
- n = newcommit(repo, None, message, ph.user, ph.date, match=match,
- force=True)
- if repo['tip'] == oldtip:
- raise util.Abort(_("qpush exactly duplicates child changeset"))
+ n = repo.commit(message, ph.user, ph.date, match=match, force=True)
+
if n is None:
raise util.Abort(_("repository commit failed"))
@@ -828,11 +739,10 @@ class queue(object):
for p in patches:
os.unlink(self.join(p))
- qfinished = []
if numrevs:
qfinished = self.applied[:numrevs]
del self.applied[:numrevs]
- self.applieddirty = True
+ self.applieddirty = 1
unknown = []
@@ -854,8 +764,7 @@ class queue(object):
raise util.Abort(''.join(msg % p for p in unknown))
self.parseseries()
- self.seriesdirty = True
- return [entry.node for entry in qfinished]
+ self.seriesdirty = 1
def _revpatches(self, repo, revs):
firstrev = repo[self.applied[0].node].rev()
@@ -882,17 +791,8 @@ class queue(object):
return patches
def finish(self, repo, revs):
- # Manually trigger phase computation to ensure phasedefaults is
- # executed before we remove the patches.
- repo._phasecache
patches = self._revpatches(repo, sorted(revs))
- qfinished = self._cleanup(patches, len(patches))
- if qfinished and repo.ui.configbool('mq', 'secret', False):
- # only use this logic when the secret option is added
- oldqbase = repo[qfinished[0]]
- tphase = repo.ui.config('phases', 'new-commit', phases.draft)
- if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
- phases.advanceboundary(repo, tphase, qfinished)
+ self._cleanup(patches, len(patches))
def delete(self, repo, patches, opts):
if not patches and not opts.get('rev'):
@@ -933,35 +833,19 @@ class queue(object):
return top, patch
return None, None
- def checksubstate(self, repo, baserev=None):
+ def checksubstate(self, repo):
'''return list of subrepos at a different revision than substate.
Abort if any subrepos have uncommitted changes.'''
inclsubs = []
wctx = repo[None]
- if baserev:
- bctx = repo[baserev]
- else:
- bctx = wctx.parents()[0]
for s in wctx.substate:
if wctx.sub(s).dirty(True):
raise util.Abort(
_("uncommitted changes in subrepository %s") % s)
- elif s not in bctx.substate or bctx.sub(s).dirty():
+ elif wctx.sub(s).dirty():
inclsubs.append(s)
return inclsubs
- def putsubstate2changes(self, substatestate, changes):
- for files in changes[:3]:
- if '.hgsubstate' in files:
- return # already listed up
- # not yet listed up
- if substatestate in 'a?':
- changes[1].append('.hgsubstate')
- elif substatestate in 'r':
- changes[2].append('.hgsubstate')
- else: # modified
- changes[0].append('.hgsubstate')
-
def localchangesfound(self, refresh=True):
if refresh:
raise util.Abort(_("local changes found, refresh first"))
@@ -997,10 +881,6 @@ class queue(object):
else:
raise util.Abort(_('patch "%s" already exists') % name)
- def checkkeepchanges(self, keepchanges, force):
- if force and keepchanges:
- raise util.Abort(_('cannot use both --force and --keep-changes'))
-
def new(self, repo, patchfn, *pats, **opts):
"""options:
msg: a string or a no-argument function returning a string
@@ -1016,7 +896,6 @@ class queue(object):
inclsubs = self.checksubstate(repo)
if inclsubs:
inclsubs.append('.hgsubstate')
- substatestate = repo.dirstate['.hgsubstate']
if opts.get('include') or opts.get('exclude') or pats:
if inclsubs:
pats = list(pats or []) + inclsubs
@@ -1026,12 +905,10 @@ class queue(object):
if f != '.hgsubstate': # .hgsubstate is auto-created
raise util.Abort('%s: %s' % (f, msg))
match.bad = badfn
- changes = repo.status(match=match)
- m, a, r, d = changes[:4]
+ m, a, r, d = repo.status(match=match)[:4]
else:
- changes = self.checklocalchanges(repo, force=True)
- m, a, r, d = changes
- match = scmutil.matchfiles(repo, m + a + r + inclsubs)
+ m, a, r, d = self.checklocalchanges(repo, force=True)
+ match = scmutil.matchfiles(repo, m + a + r + inclsubs)
if len(repo[None].parents()) > 1:
raise util.Abort(_('cannot manage merge changesets'))
commitfiles = m + a + r
@@ -1061,42 +938,41 @@ class queue(object):
p.write("# User " + user + "\n")
if date:
p.write("# Date %s %s\n\n" % date)
- if util.safehasattr(msg, '__call__'):
+ if hasattr(msg, '__call__'):
msg = msg()
commitmsg = msg and msg or ("[mq]: %s" % patchfn)
- n = newcommit(repo, None, commitmsg, user, date, match=match,
- force=True)
+ n = repo.commit(commitmsg, user, date, match=match, force=True)
if n is None:
raise util.Abort(_("repo commit failed"))
try:
self.fullseries[insert:insert] = [patchfn]
self.applied.append(statusentry(n, patchfn))
self.parseseries()
- self.seriesdirty = True
- self.applieddirty = True
+ self.seriesdirty = 1
+ self.applieddirty = 1
if msg:
msg = msg + "\n\n"
p.write(msg)
if commitfiles:
parent = self.qparents(repo, n)
- if inclsubs:
- self.putsubstate2changes(substatestate, changes)
chunks = patchmod.diff(repo, node1=parent, node2=n,
- changes=changes, opts=diffopts)
+ match=match, opts=diffopts)
for chunk in chunks:
p.write(chunk)
p.close()
+ wlock.release()
+ wlock = None
r = self.qrepo()
if r:
r[None].add([patchfn])
- except: # re-raises
+ except:
repo.rollback()
raise
except Exception:
patchpath = self.join(patchfn)
try:
os.unlink(patchpath)
- except OSError:
+ except:
self.ui.warn(_('error unlinking %s\n') % patchpath)
raise
self.removeundo(repo)
@@ -1115,7 +991,12 @@ class queue(object):
hg.clean(repo, urev)
repo.dirstate.write()
- repair.strip(self.ui, repo, revs, backup)
+ self.removeundo(repo)
+ for rev in revs:
+ repair.strip(self.ui, repo, rev, backup)
+ # strip may have unbundled a set of backed up revisions after
+ # the actual strip
+ self.removeundo(repo)
finally:
release(lock, wlock)
@@ -1129,10 +1010,12 @@ class queue(object):
# if the exact patch name does not exist, we try a few
# variations. If strict is passed, we try only #1
#
- # 1) a number (as string) to indicate an offset in the series file
+ # 1) a number to indicate an offset in the series file
# 2) a unique substring of the patch name was given
# 3) patchname[-+]num to indicate an offset in the series file
def lookup(self, patch, strict=False):
+ patch = patch and str(patch)
+
def partialname(s):
if s in self.series:
return s
@@ -1151,6 +1034,8 @@ class queue(object):
return self.series[0]
return None
+ if patch is None:
+ return None
if patch in self.series:
return patch
@@ -1193,10 +1078,8 @@ class queue(object):
return self.series[i + off]
raise util.Abort(_("patch %s not in series") % patch)
- def push(self, repo, patch=None, force=False, list=False, mergeq=None,
- all=False, move=False, exact=False, nobackup=False,
- keepchanges=False):
- self.checkkeepchanges(keepchanges, force)
+ def push(self, repo, patch=None, force=False, list=False,
+ mergeq=None, all=False, move=False, exact=False):
diffopts = self.diffopts()
wlock = repo.wlock()
try:
@@ -1212,12 +1095,12 @@ class queue(object):
self.ui.warn(_('no patches in series\n'))
return 0
+ patch = self.lookup(patch)
# Suppose our series file is: A B C and the current 'top'
# patch is B. qpush C should be performed (moving forward)
# qpush B is a NOP (no change) qpush A is an error (can't
# go backwards with qpush)
if patch:
- patch = self.lookup(patch)
info = self.isapplied(patch)
if info and info[0] >= len(self.applied) - 1:
self.ui.warn(
@@ -1251,47 +1134,37 @@ class queue(object):
if start == len(self.series):
self.ui.warn(_('patch series already fully applied\n'))
return 1
- if not force and not keepchanges:
+ if not force:
self.checklocalchanges(repo, refresh=self.applied)
if exact:
- if keepchanges:
- raise util.Abort(
- _("cannot use --exact and --keep-changes together"))
if move:
- raise util.Abort(_('cannot use --exact and --move '
- 'together'))
+ raise util.Abort(_("cannot use --exact and --move together"))
if self.applied:
- raise util.Abort(_('cannot push --exact with applied '
- 'patches'))
+ raise util.Abort(_("cannot push --exact with applied patches"))
root = self.series[start]
target = patchheader(self.join(root), self.plainmode).parent
if not target:
- raise util.Abort(
- _("%s does not have a parent recorded") % root)
+ raise util.Abort(_("%s does not have a parent recorded" % root))
if not repo[target] == repo['.']:
hg.update(repo, target)
if move:
if not patch:
raise util.Abort(_("please specify the patch to move"))
- for fullstart, rpn in enumerate(self.fullseries):
- # strip markers for patch guards
- if self.guard_re.split(rpn, 1)[0] == self.series[start]:
- break
- for i, rpn in enumerate(self.fullseries[fullstart:]):
+ for i, rpn in enumerate(self.fullseries[start:]):
# strip markers for patch guards
if self.guard_re.split(rpn, 1)[0] == patch:
break
- index = fullstart + i
+ index = start + i
assert index < len(self.fullseries)
fullpatch = self.fullseries[index]
del self.fullseries[index]
- self.fullseries.insert(fullstart, fullpatch)
+ self.fullseries.insert(start, fullpatch)
self.parseseries()
- self.seriesdirty = True
+ self.seriesdirty = 1
- self.applieddirty = True
+ self.applieddirty = 1
if start > 0:
self.checktoppatch(repo)
if not patch:
@@ -1300,23 +1173,14 @@ class queue(object):
else:
end = self.series.index(patch, start) + 1
- tobackup = set()
- if (not nobackup and force) or keepchanges:
- m, a, r, d = self.checklocalchanges(repo, force=True)
- if keepchanges:
- tobackup.update(m + a + r + d)
- else:
- tobackup.update(m + a)
-
s = self.series[start:end]
all_files = set()
try:
if mergeq:
ret = self.mergepatch(repo, mergeq, s, diffopts)
else:
- ret = self.apply(repo, s, list, all_files=all_files,
- tobackup=tobackup, keepchanges=keepchanges)
- except: # re-raises
+ ret = self.apply(repo, s, list, all_files=all_files)
+ except:
self.ui.warn(_('cleaning up working directory...'))
node = repo.dirstate.p1()
hg.revert(repo, node, None)
@@ -1345,9 +1209,7 @@ class queue(object):
finally:
wlock.release()
- def pop(self, repo, patch=None, force=False, update=True, all=False,
- nobackup=False, keepchanges=False):
- self.checkkeepchanges(keepchanges, force)
+ def pop(self, repo, patch=None, force=False, update=True, all=False):
wlock = repo.wlock()
try:
if patch:
@@ -1392,17 +1254,10 @@ class queue(object):
break
update = needupdate
- tobackup = set()
- if update:
- m, a, r, d = self.checklocalchanges(
- repo, force=force or keepchanges)
- if force:
- if not nobackup:
- tobackup.update(m + a)
- elif keepchanges:
- tobackup.update(m + a + r + d)
-
- self.applieddirty = True
+ if not force and update:
+ self.checklocalchanges(repo)
+
+ self.applieddirty = 1
end = len(self.applied)
rev = self.applied[start].node
if update:
@@ -1417,10 +1272,6 @@ class queue(object):
if heads != [self.applied[-1].node]:
raise util.Abort(_("popping would remove a revision not "
"managed by this patch queue"))
- if not repo[self.applied[-1].node].mutable():
- raise util.Abort(
- _("popping would remove an immutable revision"),
- hint=_('see "hg help phases" for details'))
# we know there are no local changes, so we can make a simplified
# form of hg.update.
@@ -1430,12 +1281,6 @@ class queue(object):
m, a, r, d = repo.status(qp, top)[:4]
if d:
raise util.Abort(_("deletions found between repo revs"))
-
- tobackup = set(a + m + r) & tobackup
- if keepchanges and tobackup:
- self.localchangesfound()
- self.backup(repo, tobackup)
-
for f in a:
try:
util.unlinkpath(repo.wjoin(f))
@@ -1447,7 +1292,7 @@ class queue(object):
fctx = ctx[f]
repo.wwrite(f, fctx.data(), fctx.flags())
repo.dirstate.normal(f)
- repo.setparents(qp, nullid)
+ repo.dirstate.setparents(qp, nullid)
for patch in reversed(self.applied[start:end]):
self.ui.status(_("popping %s\n") % patch.name)
del self.applied[start:end]
@@ -1488,18 +1333,11 @@ class queue(object):
(top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
if repo.changelog.heads(top) != [top]:
raise util.Abort(_("cannot refresh a revision with children"))
- if not repo[top].mutable():
- raise util.Abort(_("cannot refresh immutable revision"),
- hint=_('see "hg help phases" for details'))
+
+ inclsubs = self.checksubstate(repo)
cparents = repo.changelog.parents(top)
patchparent = self.qparents(repo, top)
-
- inclsubs = self.checksubstate(repo, hex(patchparent))
- if inclsubs:
- inclsubs.append('.hgsubstate')
- substatestate = repo.dirstate['.hgsubstate']
-
ph = patchheader(self.join(patchfn), self.plainmode)
diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
if msg:
@@ -1577,6 +1415,10 @@ class queue(object):
a = list(aa)
c = [filter(matchfn, l) for l in (m, a, r)]
match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
+ chunks = patchmod.diff(repo, patchparent, match=match,
+ changes=c, opts=diffopts)
+ for chunk in chunks:
+ patchf.write(chunk)
try:
if diffopts.git or diffopts.upgrade:
@@ -1635,35 +1477,24 @@ class queue(object):
user = ph.user or changes[1]
- oldphase = repo[top].phase()
-
# assumes strip can roll itself back if interrupted
- repo.setparents(*cparents)
+ repo.dirstate.setparents(*cparents)
self.applied.pop()
- self.applieddirty = True
+ self.applieddirty = 1
self.strip(repo, [top], update=False,
backup='strip')
- except: # re-raises
+ except:
repo.dirstate.invalidate()
raise
try:
# might be nice to attempt to roll back strip after this
-
- # Ensure we create a new changeset in the same phase than
- # the old one.
- n = newcommit(repo, oldphase, message, user, ph.date,
- match=match, force=True)
+ n = repo.commit(message, user, ph.date, match=match,
+ force=True)
# only write patch after a successful commit
- if inclsubs:
- self.putsubstate2changes(substatestate, c)
- chunks = patchmod.diff(repo, patchparent,
- changes=c, opts=diffopts)
- for chunk in chunks:
- patchf.write(chunk)
- patchf.close()
+ patchf.rename()
self.applied.append(statusentry(n, patchfn))
- except: # re-raises
+ except:
ctx = repo[cparents[0]]
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
self.savedirty()
@@ -1790,14 +1621,14 @@ class queue(object):
else:
series.append(l)
if datastart is None:
- self.ui.warn(_("no saved patch data found\n"))
+ self.ui.warn(_("No saved patch data found\n"))
return 1
self.ui.warn(_("restoring status: %s\n") % lines[0])
self.fullseries = series
self.applied = applied
self.parseseries()
- self.seriesdirty = True
- self.applieddirty = True
+ self.seriesdirty = 1
+ self.applieddirty = 1
heads = repo.changelog.heads()
if delete:
if rev not in heads:
@@ -1817,7 +1648,7 @@ class queue(object):
self.ui.status(_("updating queue directory\n"))
r = self.qrepo()
if not r:
- self.ui.warn(_("unable to load queue repository\n"))
+ self.ui.warn(_("Unable to load queue repository\n"))
return 1
hg.clean(r, qpp[0])
@@ -1845,7 +1676,7 @@ class queue(object):
self.ui.warn(_("repo commit failed\n"))
return 1
self.applied.append(statusentry(n, '.hg.patches.save.line'))
- self.applieddirty = True
+ self.applieddirty = 1
self.removeundo(repo)
def fullseriesend(self):
@@ -1869,9 +1700,9 @@ class queue(object):
for i in xrange(start, len(self.series)):
p, reason = self.pushable(i)
if p:
- return i
+ break
self.explainpushable(i)
- return len(self.series)
+ return i
if self.applied:
p = self.applied[-1].name
try:
@@ -1902,12 +1733,9 @@ class queue(object):
'files'))
rev = scmutil.revrange(repo, rev)
rev.sort(reverse=True)
- elif not files:
- raise util.Abort(_('no files or revisions specified'))
if (len(files) > 1 or len(rev) > 1) and patchname:
raise util.Abort(_('option "-n" not valid when importing multiple '
'patches'))
- imported = []
if rev:
# If mq patches are applied, we can only import revisions
# that form a linear path to qbase.
@@ -1934,9 +1762,6 @@ class queue(object):
diffopts = self.diffopts({'git': git})
for r in rev:
- if not repo[r].mutable():
- raise util.Abort(_('revision %d is not mutable') % r,
- hint=_('see "hg help phases" for details'))
p1, p2 = repo.changelog.parentrevs(r)
n = repo.changelog.node(r)
if p2 != nullrev:
@@ -1960,13 +1785,9 @@ class queue(object):
self.applied.insert(0, se)
self.added.append(patchname)
- imported.append(patchname)
patchname = None
- if rev and repo.ui.configbool('mq', 'secret', False):
- # if we added anything with --rev, we must move the secret root
- phases.retractboundary(repo, phases.secret, [n])
self.parseseries()
- self.applieddirty = True
+ self.applieddirty = 1
self.seriesdirty = True
for i, filename in enumerate(files):
@@ -2015,19 +1836,9 @@ class queue(object):
self.seriesdirty = True
self.ui.warn(_("adding %s to series file\n") % patchname)
self.added.append(patchname)
- imported.append(patchname)
patchname = None
self.removeundo(repo)
- return imported
-
-def fixkeepchangesopts(ui, opts):
- if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
- or opts.get('exact')):
- return opts
- opts = dict(opts)
- opts['keep_changes'] = True
- return opts
@command("qdelete|qremove|qrm",
[('k', 'keep', None, _('keep patch file')),
@@ -2037,9 +1848,8 @@ def fixkeepchangesopts(ui, opts):
def delete(ui, repo, *patches, **opts):
"""remove patches from queue
- The patches must not be applied, and at least one patch is required. Exact
- patch identifiers must be given. With -k/--keep, the patch files are
- preserved in the patch directory.
+ The patches must not be applied, and at least one patch is required. With
+ -k/--keep, the patch files are preserved in the patch directory.
To stop managing a patch and move it into permanent history,
use the :hg:`qfinish` command."""
@@ -2049,7 +1859,7 @@ def delete(ui, repo, *patches, **opts):
return 0
@command("qapplied",
- [('1', 'last', None, _('show only the preceding applied patch'))
+ [('1', 'last', None, _('show only the last patch'))
] + seriesopts,
_('hg qapplied [-1] [-s] [PATCH]'))
def applied(ui, repo, patch=None, **opts):
@@ -2115,9 +1925,9 @@ def unapplied(ui, repo, patch=None, **opts):
_('place existing revisions under mq control'), _('REV')),
('g', 'git', None, _('use git extended diff format')),
('P', 'push', None, _('qpush after importing'))],
- _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
+ _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
def qimport(ui, repo, *filename, **opts):
- """import a patch or existing changeset
+ """import a patch
The patch is inserted into the series after the last applied
patch. If no patches have been applied, qimport prepends the patch
@@ -2149,21 +1959,16 @@ def qimport(ui, repo, *filename, **opts):
Returns 0 if import succeeded.
"""
- lock = repo.lock() # cause this may move phase
+ q = repo.mq
try:
- q = repo.mq
- try:
- imported = q.qimport(
- repo, filename, patchname=opts.get('name'),
- existing=opts.get('existing'), force=opts.get('force'),
- rev=opts.get('rev'), git=opts.get('git'))
- finally:
- q.savedirty()
+ q.qimport(repo, filename, patchname=opts.get('name'),
+ existing=opts.get('existing'), force=opts.get('force'),
+ rev=opts.get('rev'), git=opts.get('git'))
finally:
- lock.release()
+ q.savedirty()
- if imported and opts.get('push') and not opts.get('rev'):
- return q.push(repo, imported[-1])
+ if opts.get('push') and not opts.get('rev'):
+ return q.push(repo, None)
return 0
def qinit(ui, repo, create):
@@ -2210,8 +2015,7 @@ def init(ui, repo, **opts):
@command("qclone",
[('', 'pull', None, _('use pull protocol to copy metadata')),
- ('U', 'noupdate', None,
- _('do not update the new working directories')),
+ ('U', 'noupdate', None, _('do not update the new working directories')),
('', 'uncompressed', None,
_('use uncompressed transfer (fast over LAN)')),
('p', 'patches', '',
@@ -2236,63 +2040,53 @@ def clone(ui, source, dest=None, **opts):
Return 0 on success.
'''
def patchdir(repo):
- """compute a patch repo url from a repo object"""
url = repo.url()
if url.endswith('/'):
url = url[:-1]
return url + '/.hg/patches'
-
- # main repo (destination and sources)
if dest is None:
dest = hg.defaultdest(source)
- sr = hg.peer(ui, opts, ui.expandpath(source))
-
- # patches repo (source only)
+ sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
if opts.get('patches'):
patchespath = ui.expandpath(opts.get('patches'))
else:
patchespath = patchdir(sr)
try:
- hg.peer(ui, opts, patchespath)
+ hg.repository(ui, patchespath)
except error.RepoError:
raise util.Abort(_('versioned patch repository not found'
' (see init --mq)'))
qbase, destrev = None, None
if sr.local():
- repo = sr.local()
- if repo.mq.applied and repo[qbase].phase() != phases.secret:
- qbase = repo.mq.applied[0].node
+ if sr.mq.applied:
+ qbase = sr.mq.applied[0].node
if not hg.islocal(dest):
- heads = set(repo.heads())
- destrev = list(heads.difference(repo.heads(qbase)))
- destrev.append(repo.changelog.parents(qbase)[0])
+ heads = set(sr.heads())
+ destrev = list(heads.difference(sr.heads(qbase)))
+ destrev.append(sr.changelog.parents(qbase)[0])
elif sr.capable('lookup'):
try:
qbase = sr.lookup('qbase')
except error.RepoError:
pass
-
ui.note(_('cloning main repository\n'))
sr, dr = hg.clone(ui, opts, sr.url(), dest,
pull=opts.get('pull'),
rev=destrev,
update=False,
stream=opts.get('uncompressed'))
-
ui.note(_('cloning patch repository\n'))
hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
pull=opts.get('pull'), update=not opts.get('noupdate'),
stream=opts.get('uncompressed'))
-
if dr.local():
- repo = dr.local()
if qbase:
ui.note(_('stripping applied patches from destination '
'repository\n'))
- repo.mq.strip(repo, [qbase], update=False, backup=None)
+ dr.mq.strip(dr, [qbase], update=False, backup=None)
if not opts.get('noupdate'):
ui.note(_('updating destination repository\n'))
- hg.update(repo, repo.changelog.tip())
+ hg.update(dr, dr.changelog.tip())
@command("qcommit|qci",
commands.table["^commit|ci"][1],
@@ -2315,8 +2109,7 @@ def series(ui, repo, **opts):
"""print the entire series file
Returns 0 on success."""
- repo.mq.qseries(repo, missing=opts.get('missing'),
- summary=opts.get('summary'))
+ repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
return 0
@command("qtop", seriesopts, _('hg qtop [-s]'))
@@ -2335,7 +2128,7 @@ def top(ui, repo, **opts):
@command("qnext", seriesopts, _('hg qnext [-s]'))
def next(ui, repo, **opts):
- """print the name of the next pushable patch
+ """print the name of the next patch
Returns 0 on success."""
q = repo.mq
@@ -2347,7 +2140,7 @@ def next(ui, repo, **opts):
@command("qprev", seriesopts, _('hg qprev [-s]'))
def prev(ui, repo, **opts):
- """print the name of the preceding applied patch
+ """print the name of the previous patch
Returns 0 on success."""
q = repo.mq
@@ -2358,8 +2151,7 @@ def prev(ui, repo, **opts):
if not l:
ui.write(_("no patches applied\n"))
return 1
- idx = q.series.index(q.applied[-2].name)
- q.qseries(repo, start=idx, length=1, status='A',
+ q.qseries(repo, start=l - 2, length=1, status='A',
summary=opts.get('summary'))
def setupheaderopts(ui, opts):
@@ -2517,7 +2309,9 @@ def fold(ui, repo, *files, **opts):
current patch header, separated by a line of ``* * *``.
Returns 0 on success."""
+
q = repo.mq
+
if not files:
raise util.Abort(_('qfold requires at least one patch name'))
if not q.checktoppatch(repo)[0]:
@@ -2535,10 +2329,9 @@ def fold(ui, repo, *files, **opts):
for f in files:
p = q.lookup(f)
if p in patches or p == parent:
- ui.warn(_('skipping already folded patch %s\n') % p)
+ ui.warn(_('Skipping already folded patch %s\n') % p)
if q.isapplied(p):
- raise util.Abort(_('qfold cannot fold already applied patch %s')
- % p)
+ raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
patches.append(p)
for p in patches:
@@ -2572,26 +2365,18 @@ def fold(ui, repo, *files, **opts):
wlock.release()
@command("qgoto",
- [('', 'keep-changes', None,
- _('tolerate non-conflicting local changes')),
- ('f', 'force', None, _('overwrite any local changes')),
- ('', 'no-backup', None, _('do not save backup copies of files'))],
+ [('f', 'force', None, _('overwrite any local changes'))],
_('hg qgoto [OPTION]... PATCH'))
def goto(ui, repo, patch, **opts):
'''push or pop patches until named patch is at top of stack
Returns 0 on success.'''
- opts = fixkeepchangesopts(ui, opts)
q = repo.mq
patch = q.lookup(patch)
- nobackup = opts.get('no_backup')
- keepchanges = opts.get('keep_changes')
if q.isapplied(patch):
- ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
- keepchanges=keepchanges)
+ ret = q.pop(repo, patch, force=opts.get('force'))
else:
- ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
- keepchanges=keepchanges)
+ ret = q.push(repo, patch, force=opts.get('force'))
q.savedirty()
return ret
@@ -2647,8 +2432,7 @@ def guard(ui, repo, *args, **opts):
args = list(args)
if opts.get('list'):
if args or opts.get('none'):
- raise util.Abort(_('cannot mix -l/--list with options or '
- 'arguments'))
+ raise util.Abort(_('cannot mix -l/--list with options or arguments'))
for i in xrange(len(q.series)):
status(i)
return
@@ -2712,34 +2496,26 @@ def savename(path):
return newpath
@command("^qpush",
- [('', 'keep-changes', None,
- _('tolerate non-conflicting local changes')),
- ('f', 'force', None, _('apply on top of local changes')),
- ('e', 'exact', None,
- _('apply the target patch to its recorded parent')),
+ [('f', 'force', None, _('apply on top of local changes')),
+ ('e', 'exact', None, _('apply the target patch to its recorded parent')),
('l', 'list', None, _('list patch name in commit text')),
('a', 'all', None, _('apply all patches')),
('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
('n', 'name', '',
_('merge queue name (DEPRECATED)'), _('NAME')),
- ('', 'move', None,
- _('reorder patch series and apply only the patch')),
- ('', 'no-backup', None, _('do not save backup copies of files'))],
+ ('', 'move', None, _('reorder patch series and apply only the patch'))],
_('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
def push(ui, repo, patch=None, **opts):
"""push the next patch onto the stack
- By default, abort if the working directory contains uncommitted
- changes. With --keep-changes, abort only if the uncommitted files
- overlap with patched files. With -f/--force, backup and patch over
- uncommitted changes.
+ When -f/--force is applied, all local changes in patched files
+ will be lost.
Return 0 on success.
"""
q = repo.mq
mergeq = None
- opts = fixkeepchangesopts(ui, opts)
if opts.get('merge'):
if opts.get('name'):
newpath = repo.join(opts.get('name'))
@@ -2748,48 +2524,37 @@ def push(ui, repo, patch=None, **opts):
if not newpath:
ui.warn(_("no saved queues found, please use -n\n"))
return 1
- mergeq = queue(ui, repo.path, newpath)
+ mergeq = queue(ui, repo.join(""), newpath)
ui.warn(_("merging with queue at: %s\n") % mergeq.path)
ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
- exact=opts.get('exact'), nobackup=opts.get('no_backup'),
- keepchanges=opts.get('keep_changes'))
+ exact=opts.get('exact'))
return ret
@command("^qpop",
[('a', 'all', None, _('pop all patches')),
('n', 'name', '',
_('queue name to pop (DEPRECATED)'), _('NAME')),
- ('', 'keep-changes', None,
- _('tolerate non-conflicting local changes')),
- ('f', 'force', None, _('forget any local changes to patched files')),
- ('', 'no-backup', None, _('do not save backup copies of files'))],
+ ('f', 'force', None, _('forget any local changes to patched files'))],
_('hg qpop [-a] [-f] [PATCH | INDEX]'))
def pop(ui, repo, patch=None, **opts):
"""pop the current patch off the stack
- Without argument, pops off the top of the patch stack. If given a
- patch name, keeps popping off patches until the named patch is at
- the top of the stack.
-
- By default, abort if the working directory contains uncommitted
- changes. With --keep-changes, abort only if the uncommitted files
- overlap with patched files. With -f/--force, backup and discard
- changes made to such files.
+ By default, pops off the top of the patch stack. If given a patch
+ name, keeps popping off patches until the named patch is at the
+ top of the stack.
Return 0 on success.
"""
- opts = fixkeepchangesopts(ui, opts)
localupdate = True
if opts.get('name'):
- q = queue(ui, repo.path, repo.join(opts.get('name')))
+ q = queue(ui, repo.join(""), repo.join(opts.get('name')))
ui.warn(_('using patch queue: %s\n') % q.path)
localupdate = False
else:
q = repo.mq
ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
- all=opts.get('all'), nobackup=opts.get('no_backup'),
- keepchanges=opts.get('keep_changes'))
+ all=opts.get('all'))
q.savedirty()
return ret
@@ -2801,7 +2566,9 @@ def rename(ui, repo, patch, name=None, **opts):
With two arguments, renames PATCH1 to PATCH2.
Returns 0 on success."""
+
q = repo.mq
+
if not name:
name = patch
patch = None
@@ -2824,12 +2591,12 @@ def rename(ui, repo, patch, name=None, **opts):
guards = q.guard_re.findall(q.fullseries[i])
q.fullseries[i] = name + ''.join([' #' + g for g in guards])
q.parseseries()
- q.seriesdirty = True
+ q.seriesdirty = 1
info = q.isapplied(patch)
if info:
q.applied[info[0]] = statusentry(info[1], name)
- q.applieddirty = True
+ q.applieddirty = 1
destdir = os.path.dirname(absdest)
if not os.path.isdir(destdir):
@@ -2844,6 +2611,8 @@ def rename(ui, repo, patch, name=None, **opts):
r.dirstate.drop(patch)
r.dirstate.add(name)
else:
+ if r.dirstate[name] == 'r':
+ wctx.undelete([name])
wctx.copy(patch, name)
wctx.forget([patch])
finally:
@@ -2882,7 +2651,7 @@ def save(ui, repo, **opts):
ret = q.save(repo, msg=message)
if ret:
return ret
- q.savedirty() # save to .hg/patches before copying
+ q.savedirty()
if opts.get('copy'):
path = q.path
if opts.get('name'):
@@ -2899,28 +2668,22 @@ def save(ui, repo, **opts):
ui.warn(_("copy %s to %s\n") % (path, newpath))
util.copyfiles(path, newpath)
if opts.get('empty'):
- del q.applied[:]
- q.applieddirty = True
- q.savedirty()
+ try:
+ os.unlink(q.join(q.statuspath))
+ except:
+ pass
return 0
@command("strip",
- [
- ('r', 'rev', [], _('strip specified revision (optional, '
- 'can specify revisions without this '
- 'option)'), _('REV')),
- ('f', 'force', None, _('force removal of changesets, discard '
+ [('f', 'force', None, _('force removal of changesets, discard '
'uncommitted changes (no backup)')),
('b', 'backup', None, _('bundle only changesets with local revision'
' number greater than REV which are not'
' descendants of REV (DEPRECATED)')),
- ('', 'no-backup', None, _('no backups')),
+ ('n', 'no-backup', None, _('no backups')),
('', 'nobackup', None, _('no backups (DEPRECATED)')),
- ('n', '', None, _('ignored (DEPRECATED)')),
- ('k', 'keep', None, _("do not modify working copy during strip")),
- ('B', 'bookmark', '', _("remove revs only reachable from given"
- " bookmark"))],
- _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
+ ('k', 'keep', None, _("do not modify working copy during strip"))],
+ _('hg strip [-k] [-f] [-n] REV...'))
def strip(ui, repo, *revs, **opts):
"""strip changesets and all their descendants from the repository
@@ -2944,10 +2707,6 @@ def strip(ui, repo, *revs, **opts):
Use the --no-backup option to discard the backup bundle once the
operation completes.
- Strip is not a history-rewriting operation and can be used on
- changesets in the public phase. But if the stripped changesets have
- been pushed to a remote repository you will likely pull them again.
-
Return 0 on success.
"""
backup = 'all'
@@ -2957,38 +2716,11 @@ def strip(ui, repo, *revs, **opts):
backup = 'none'
cl = repo.changelog
- revs = list(revs) + opts.get('rev')
revs = set(scmutil.revrange(repo, revs))
-
- if opts.get('bookmark'):
- mark = opts.get('bookmark')
- marks = repo._bookmarks
- if mark not in marks:
- raise util.Abort(_("bookmark '%s' not found") % mark)
-
- # If the requested bookmark is not the only one pointing to a
- # a revision we have to only delete the bookmark and not strip
- # anything. revsets cannot detect that case.
- uniquebm = True
- for m, n in marks.iteritems():
- if m != mark and n == repo[mark].node():
- uniquebm = False
- break
- if uniquebm:
- rsrevs = repo.revs("ancestors(bookmark(%s)) - "
- "ancestors(head() and not bookmark(%s)) - "
- "ancestors(bookmark() and not bookmark(%s))",
- mark, mark, mark)
- revs.update(set(rsrevs))
- if not revs:
- del marks[mark]
- repo._writebookmarks(mark)
- ui.write(_("bookmark '%s' deleted\n") % mark)
-
if not revs:
raise util.Abort(_('empty revision set'))
- descendants = set(cl.descendants(revs))
+ descendants = set(cl.descendants(*revs))
strippedrevs = revs.union(descendants)
roots = revs.difference(descendants)
@@ -3030,14 +2762,8 @@ def strip(ui, repo, *revs, **opts):
finally:
wlock.release()
- if opts.get('bookmark'):
- del marks[mark]
- repo._writebookmarks(marks)
- ui.write(_("bookmark '%s' deleted\n") % mark)
-
repo.mq.strip(repo, revs, backup=backup, update=update,
force=opts.get('force'))
-
return 0
@command("qselect",
@@ -3141,7 +2867,7 @@ def select(ui, repo, *args, **opts):
if i == 0:
q.pop(repo, all=True)
else:
- q.pop(repo, str(i - 1))
+ q.pop(repo, i - 1)
break
if popped:
try:
@@ -3183,22 +2909,12 @@ def finish(ui, repo, *revrange, **opts):
return 0
revs = scmutil.revrange(repo, revrange)
- if repo['.'].rev() in revs and repo[None].files():
- ui.warn(_('warning: uncommitted changes in the working directory\n'))
- # queue.finish may changes phases but leave the responsability to lock the
- # repo to the caller to avoid deadlock with wlock. This command code is
- # responsability for this locking.
- lock = repo.lock()
- try:
- q.finish(repo, revs)
- q.savedirty()
- finally:
- lock.release()
+ q.finish(repo, revs)
+ q.savedirty()
return 0
@command("qqueue",
[('l', 'list', False, _('list all available queues')),
- ('', 'active', False, _('print name of active queue')),
('c', 'create', False, _('create new queue')),
('', 'rename', False, _('rename active queue')),
('', 'delete', False, _('delete reference to queue')),
@@ -3213,8 +2929,7 @@ def qqueue(ui, repo, name=None, **opts):
Omitting a queue name or specifying -l/--list will show you the registered
queues - by default the "normal" patches queue is registered. The currently
- active queue will be marked with "(active)". Specifying --active will print
- only the name of the active queue.
+ active queue will be marked with "(active)".
To create a new queue, use -c/--create. The queue is automatically made
active, except in the case where there are applied patches from the
@@ -3226,7 +2941,9 @@ def qqueue(ui, repo, name=None, **opts):
Returns 0 on success.
'''
+
q = repo.mq
+
_defaultqueue = 'patches'
_allqueues = 'patches.queues'
_activequeue = 'patches.queue'
@@ -3305,11 +3022,8 @@ def qqueue(ui, repo, name=None, **opts):
fh.close()
util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
- if not name or opts.get('list') or opts.get('active'):
+ if not name or opts.get('list'):
current = _getcurrent()
- if opts.get('active'):
- ui.write('%s\n' % (current,))
- return
for queue in _getqueues():
ui.write('%s' % (queue,))
if queue == current and not ui.quiet:
@@ -3369,22 +3083,11 @@ def qqueue(ui, repo, name=None, **opts):
raise util.Abort(_('use --create to create a new queue'))
_setactive(name)
-def mqphasedefaults(repo, roots):
- """callback used to set mq changeset as secret when no phase data exists"""
- if repo.mq.applied:
- if repo.ui.configbool('mq', 'secret', False):
- mqphase = phases.secret
- else:
- mqphase = phases.draft
- qbase = repo[repo.mq.applied[0].node]
- roots[mqphase].add(qbase.node())
- return roots
-
def reposetup(ui, repo):
class mqrepo(repo.__class__):
@util.propertycache
def mq(self):
- return queue(self.ui, self.path)
+ return queue(self.ui, self.join(""))
def abortifwdirpatched(self, errmsg, force=False):
if self.mq.applied and not force:
@@ -3404,22 +3107,15 @@ def reposetup(ui, repo):
def checkpush(self, force, revs):
if self.mq.applied and not force:
- outapplied = [e.node for e in self.mq.applied]
+ haspatches = True
if revs:
- # Assume applied patches have no non-patch descendants and
- # are not on remote already. Filtering any changeset not
- # pushed.
- heads = set(revs)
- for node in reversed(outapplied):
- if node in heads:
- break
- else:
- outapplied.pop()
- # looking for pushed and shared changeset
- for node in outapplied:
- if repo[node].phase() < phases.secret:
- raise util.Abort(_('source has mq patches applied'))
- # no non-secret patches pushed
+ # Assume applied patches have no non-patch descendants
+ # and are not on remote already. If they appear in the
+ # set of resolved 'revs', bail out.
+ applied = set(e.node for e in self.mq.applied)
+ haspatches = bool([n for n in revs if n in applied])
+ if haspatches:
+ raise util.Abort(_('source has mq patches applied'))
super(mqrepo, self).checkpush(force, revs)
def _findtags(self):
@@ -3445,8 +3141,8 @@ def reposetup(ui, repo):
tags = result[0]
for patch in mqtags:
if patch[1] in tags:
- self.ui.warn(_('tag %s overrides mq patch of the same '
- 'name\n') % patch[1])
+ self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
+ % patch[1])
else:
tags[patch[1]] = patch[0]
@@ -3454,20 +3150,16 @@ def reposetup(ui, repo):
def _branchtags(self, partial, lrev):
q = self.mq
- cl = self.changelog
- qbase = None
if not q.applied:
- if getattr(self, '_committingpatch', False):
- # Committing a new patch, must be tip
- qbase = len(cl) - 1
- else:
- qbasenode = q.applied[0].node
- try:
- qbase = cl.rev(qbasenode)
- except error.LookupError:
- self.ui.warn(_('mq status file refers to unknown node %s\n')
- % short(qbasenode))
- if qbase is None:
+ return super(mqrepo, self)._branchtags(partial, lrev)
+
+ cl = self.changelog
+ qbasenode = q.applied[0].node
+ try:
+ qbase = cl.rev(qbasenode)
+ except error.LookupError:
+ self.ui.warn(_('mq status file refers to unknown node %s\n')
+ % short(qbasenode))
return super(mqrepo, self)._branchtags(partial, lrev)
start = lrev + 1
@@ -3490,10 +3182,8 @@ def reposetup(ui, repo):
if repo.local():
repo.__class__ = mqrepo
- repo._phasedefaults.append(mqphasedefaults)
-
def mqimport(orig, ui, repo, *args, **kwargs):
- if (util.safehasattr(repo, 'abortifwdirpatched')
+ if (hasattr(repo, 'abortifwdirpatched')
and not kwargs.get('no_commit', False)):
repo.abortifwdirpatched(_('cannot import over an applied patch'),
kwargs.get('force'))
@@ -3556,12 +3246,13 @@ def revsetmq(repo, subset, x):
applied = set([repo[r.node].rev() for r in repo.mq.applied])
return [r for r in subset if r in applied]
+def extsetup(ui):
+ revset.symbols['mq'] = revsetmq
+
# tell hggettext to extract docstrings from these functions:
i18nfunctions = [revsetmq]
-def extsetup(ui):
- # Ensure mq wrappers are called first, regardless of extension load order by
- # NOT wrapping in uisetup() and instead deferring to init stage two here.
+def uisetup(ui):
mqopt = [('', 'mq', None, _("operate on patch repository"))]
extensions.wrapcommand(commands.table, 'import', mqimport)
@@ -3586,7 +3277,6 @@ def extsetup(ui):
if extmodule.__file__ != __file__:
dotable(getattr(extmodule, 'cmdtable', {}))
- revset.symbols['mq'] = revsetmq
colortable = {'qguard.negative': 'red',
'qguard.positive': 'yellow',
diff --git a/hgext/notify.py b/hgext/notify.py
index b0fbcee..5e96f11 100644
--- a/hgext/notify.py
+++ b/hgext/notify.py
@@ -5,135 +5,77 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-'''hooks for sending email push notifications
+'''hooks for sending email notifications at commit/push time
-This extension implements hooks to send email notifications when
-changesets are sent from or received by the local repository.
+Subscriptions can be managed through a hgrc file. Default mode is to
+print messages to stdout, for testing and configuring.
-First, enable the extension as explained in :hg:`help extensions`, and
-register the hook you want to run. ``incoming`` and ``changegroup`` hooks
-are run when changesets are received, while ``outgoing`` hooks are for
-changesets sent to another repository::
+To use, configure the notify extension and enable it in hgrc like
+this::
+
+ [extensions]
+ notify =
[hooks]
# one email for each incoming changeset
incoming.notify = python:hgext.notify.hook
- # one email for all incoming changesets
+ # batch emails when many changesets incoming at one time
changegroup.notify = python:hgext.notify.hook
-
- # one email for all outgoing changesets
+ # batch emails when many changesets outgoing at one time (client side)
outgoing.notify = python:hgext.notify.hook
-This registers the hooks. To enable notification, subscribers must
-be assigned to repositories. The ``[usersubs]`` section maps multiple
-repositories to a given recipient. The ``[reposubs]`` section maps
-multiple recipients to a single repository::
+ [notify]
+ # config items go here
+
+Required configuration items::
+
+ config = /path/to/file # file containing subscriptions
+
+Optional configuration items::
+
+ test = True # print messages to stdout for testing
+ strip = 3 # number of slashes to strip for url paths
+ domain = example.com # domain to use if committer missing domain
+ style = ... # style file to use when formatting email
+ template = ... # template to use when formatting email
+ incoming = ... # template to use when run as incoming hook
+ outgoing = ... # template to use when run as outgoing hook
+ changegroup = ... # template to use when run as changegroup hook
+ maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
+ maxsubject = 67 # truncate subject line longer than this
+ diffstat = True # add a diffstat before the diff content
+ sources = serve # notify if source of incoming changes in this list
+ # (serve == ssh or http, push, pull, bundle)
+ merge = False # send notification for merges (default True)
+ [email]
+ from = user@host.com # email address to send as if none given
+ [web]
+ baseurl = http://hgserver/... # root of hg web site for browsing commits
+
+The notify config file has same format as a regular hgrc file. It has
+two sections so you can express subscriptions in whatever way is
+handier for you.
+
+::
[usersubs]
- # key is subscriber email, value is a comma-separated list of repo glob
- # patterns
+ # key is subscriber email, value is ","-separated list of glob patterns
user@host = pattern
[reposubs]
- # key is glob pattern, value is a comma-separated list of subscriber
- # emails
+ # key is glob pattern, value is ","-separated list of subscriber emails
pattern = user@host
-Glob patterns are matched against absolute path to repository
-root.
-
-In order to place them under direct user management, ``[usersubs]`` and
-``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
-incorporated by reference::
-
- [notify]
- config = /path/to/subscriptionsfile
-
-Notifications will not be sent until the ``notify.test`` value is set
-to ``False``; see below.
-
-Notifications content can be tweaked with the following configuration entries:
-
-notify.test
- If ``True``, print messages to stdout instead of sending them. Default: True.
-
-notify.sources
- Space-separated list of change sources. Notifications are activated only
- when a changeset's source is in this list. Sources may be:
-
- :``serve``: changesets received via http or ssh
- :``pull``: changesets received via ``hg pull``
- :``unbundle``: changesets received via ``hg unbundle``
- :``push``: changesets sent or received via ``hg push``
- :``bundle``: changesets sent via ``hg unbundle``
-
- Default: serve.
-
-notify.strip
- Number of leading slashes to strip from url paths. By default, notifications
- reference repositories with their absolute path. ``notify.strip`` lets you
- turn them into relative paths. For example, ``notify.strip=3`` will change
- ``/long/path/repository`` into ``repository``. Default: 0.
-
-notify.domain
- Default email domain for sender or recipients with no explicit domain.
-
-notify.style
- Style file to use when formatting emails.
-
-notify.template
- Template to use when formatting emails.
-
-notify.incoming
- Template to use when run as an incoming hook, overriding ``notify.template``.
-
-notify.outgoing
- Template to use when run as an outgoing hook, overriding ``notify.template``.
-
-notify.changegroup
- Template to use when running as a changegroup hook, overriding
- ``notify.template``.
-
-notify.maxdiff
- Maximum number of diff lines to include in notification email. Set to 0
- to disable the diff, or -1 to include all of it. Default: 300.
-
-notify.maxsubject
- Maximum number of characters in email's subject line. Default: 67.
-
-notify.diffstat
- Set to True to include a diffstat before diff content. Default: True.
-
-notify.merge
- If True, send notifications for merge changesets. Default: True.
-
-notify.mbox
- If set, append mails to this mbox file instead of sending. Default: None.
-
-notify.fromauthor
- If set, use the committer of the first changeset in a changegroup for
- the "From" field of the notification mail. If not set, take the user
- from the pushing repo. Default: False.
-
-If set, the following entries will also be used to customize the
-notifications:
-
-email.from
- Email ``From`` address to use if none can be found in the generated
- email content.
-
-web.baseurl
- Root repository URL to combine with repository paths when making
- references. See also ``notify.strip``.
+Glob patterns are matched against path to repository root.
+If you like, you can put notify config file in repository that users
+can push changes to, they can manage their own subscriptions.
'''
from mercurial.i18n import _
from mercurial import patch, cmdutil, templater, util, mail
import email.Parser, email.Errors, fnmatch, socket, time
-testedwith = 'internal'
-
# template for single changeset can include email headers.
single_template = '''
Subject: changeset in {webroot}: {desc|firstline|strip}
@@ -170,7 +112,6 @@ class notifier(object):
self.stripcount = int(self.ui.config('notify', 'strip', 0))
self.root = self.strip(self.repo.root)
self.domain = self.ui.config('notify', 'domain')
- self.mbox = self.ui.config('notify', 'mbox')
self.test = self.ui.configbool('notify', 'test', True)
self.charsets = mail._charsets(self.ui)
self.subs = self.subscribers()
@@ -226,6 +167,9 @@ class notifier(object):
return [mail.addressencode(self.ui, s, self.charsets, self.test)
for s in sorted(subs)]
+ def url(self, path=None):
+ return self.ui.config('web', 'baseurl') + (path or self.root)
+
def node(self, ctx, **props):
'''format one changeset, unless it is a suppressed merge.'''
if not self.merge and len(ctx.parents()) > 1:
@@ -303,7 +247,7 @@ class notifier(object):
self.ui.status(_('notify: sending %d subscribers %d changes\n') %
(len(self.subs), count))
mail.sendmail(self.ui, util.email(msg['From']),
- self.subs, msgtext, mbox=self.mbox)
+ self.subs, msgtext)
def diff(self, ctx, ref=None):
@@ -349,18 +293,15 @@ def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
ui.pushbuffer()
data = ''
count = 0
- author = ''
if hooktype == 'changegroup' or hooktype == 'outgoing':
start, end = ctx.rev(), len(repo)
for rev in xrange(start, end):
if n.node(repo[rev]):
count += 1
- if not author:
- author = repo[rev].user()
else:
data += ui.popbuffer()
- ui.note(_('notify: suppressing notification for merge %d:%s\n')
- % (rev, repo[rev].hex()[:12]))
+ ui.note(_('notify: suppressing notification for merge %d:%s\n') %
+ (rev, repo[rev].hex()[:12]))
ui.pushbuffer()
if count:
n.diff(ctx, repo['tip'])
@@ -374,9 +315,5 @@ def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
n.diff(ctx)
data += ui.popbuffer()
- fromauthor = ui.config('notify', 'fromauthor')
- if author and fromauthor:
- data = '\n'.join(['From: %s' % author, data])
-
if count:
n.send(ctx, count, data)
diff --git a/hgext/pager.py b/hgext/pager.py
index ae430ef..ccf0bd5 100644
--- a/hgext/pager.py
+++ b/hgext/pager.py
@@ -17,11 +17,17 @@
To set the pager that should be used, set the application variable::
[pager]
- pager = less -FRX
+ pager = less -FRSX
If no pager is set, the pager extensions uses the environment variable
$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
+If you notice "BROKEN PIPE" error messages, you can disable them by
+setting::
+
+ [pager]
+ quiet = True
+
You can disable the pager for certain commands by adding them to the
pager.ignore list::
@@ -47,16 +53,14 @@ used. Use a boolean value like yes, no, on, off, or use auto for
normal behavior.
'''
-import atexit, sys, os, signal, subprocess
+import sys, os, signal, shlex, errno
from mercurial import commands, dispatch, util, extensions
from mercurial.i18n import _
-testedwith = 'internal'
-
-def _pagerfork(ui, p):
- if not util.safehasattr(os, 'fork'):
+def _runpager(p):
+ if not hasattr(os, 'fork'):
sys.stdout = util.popen(p, 'wb')
- if ui._isatty(sys.stderr):
+ if util.isatty(sys.stderr):
sys.stderr = sys.stdout
return
fdin, fdout = os.pipe()
@@ -64,7 +68,7 @@ def _pagerfork(ui, p):
if pid == 0:
os.close(fdin)
os.dup2(fdout, sys.stdout.fileno())
- if ui._isatty(sys.stderr):
+ if util.isatty(sys.stderr):
os.dup2(fdout, sys.stderr.fileno())
os.close(fdout)
return
@@ -81,35 +85,8 @@ def _pagerfork(ui, p):
else:
raise
-def _pagersubprocess(ui, p):
- pager = subprocess.Popen(p, shell=True, bufsize=-1,
- close_fds=util.closefds, stdin=subprocess.PIPE,
- stdout=sys.stdout, stderr=sys.stderr)
-
- stdout = os.dup(sys.stdout.fileno())
- stderr = os.dup(sys.stderr.fileno())
- os.dup2(pager.stdin.fileno(), sys.stdout.fileno())
- if ui._isatty(sys.stderr):
- os.dup2(pager.stdin.fileno(), sys.stderr.fileno())
-
- @atexit.register
- def killpager():
- pager.stdin.close()
- os.dup2(stdout, sys.stdout.fileno())
- os.dup2(stderr, sys.stderr.fileno())
- pager.wait()
-
-def _runpager(ui, p):
- # The subprocess module shipped with Python <= 2.4 is buggy (issue3533).
- # The compat version is buggy on Windows (issue3225), but has been shipping
- # with hg for a long time. Preserve existing functionality.
- if sys.version_info >= (2, 5):
- _pagersubprocess(ui, p)
- else:
- _pagerfork(ui, p)
-
def uisetup(ui):
- if '--debugger' in sys.argv or not ui.formatted():
+ if ui.plain() or '--debugger' in sys.argv or not util.isatty(sys.stdout):
return
def pagecmd(orig, ui, options, cmd, cmdfunc):
@@ -124,9 +101,9 @@ def uisetup(ui):
(cmd not in ui.configlist('pager', 'ignore') and not attend))):
ui.setconfig('ui', 'formatted', ui.formatted())
ui.setconfig('ui', 'interactive', False)
- if util.safehasattr(signal, "SIGPIPE"):
+ _runpager(p)
+ if ui.configbool('pager', 'quiet'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
- _runpager(ui, p)
return orig(ui, options, cmd, cmdfunc)
extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
diff --git a/hgext/patchbomb.py b/hgext/patchbomb.py
index 7ac8e27..dfc3cb0 100644
--- a/hgext/patchbomb.py
+++ b/hgext/patchbomb.py
@@ -45,28 +45,36 @@ directly from the commandline. See the [email] and [smtp] sections in
hgrc(5) for details.
'''
-import os, errno, socket, tempfile, cStringIO
+import os, errno, socket, tempfile, cStringIO, time
import email.MIMEMultipart, email.MIMEBase
import email.Utils, email.Encoders, email.Generator
-from mercurial import cmdutil, commands, hg, mail, patch, util
+from mercurial import cmdutil, commands, hg, mail, patch, util, discovery
from mercurial import scmutil
from mercurial.i18n import _
from mercurial.node import bin
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
def prompt(ui, prompt, default=None, rest=':'):
+ if not ui.interactive() and default is None:
+ raise util.Abort(_("%s Please enter a valid value" % (prompt + rest)))
if default:
prompt += ' [%s]' % default
- return ui.prompt(prompt + rest, default)
+ prompt += rest
+ while True:
+ r = ui.prompt(prompt, default=default)
+ if r:
+ return r
+ if default is not None:
+ return default
+ ui.warn(_('Please enter a valid value.\n'))
-def introwanted(opts, number):
- '''is an introductory message apparently wanted?'''
+def introneeded(opts, number):
+ '''is an introductory message required?'''
return number > 1 or opts.get('intro') or opts.get('desc')
-def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
+def makepatch(ui, repo, patchlines, opts, _charsets, idx, total,
patchname=None):
desc = []
@@ -85,7 +93,7 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
if not patchname and not node:
raise ValueError
- if opts.get('attach') and not opts.get('body'):
+ if opts.get('attach'):
body = ('\n'.join(desc[1:]).strip() or
'Patch subject is complete summary.')
body += '\n\n\n'
@@ -102,16 +110,11 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
if opts.get('diffstat'):
body += ds + '\n\n'
- addattachment = opts.get('attach') or opts.get('inline')
- if not addattachment or opts.get('body'):
- body += '\n'.join(patchlines)
-
- if addattachment:
+ if opts.get('attach') or opts.get('inline'):
msg = email.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
- p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
- opts.get('test'))
+ p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch', opts.get('test'))
binnode = bin(node)
# if node is mq patch, it will have the patch file's name as a tag
if not patchname:
@@ -121,8 +124,7 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
patchname = patchtags[0]
elif total > 1:
patchname = cmdutil.makefilename(repo, '%b-%n.patch',
- binnode, seqno=idx,
- total=total)
+ binnode, seqno=idx, total=total)
else:
patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
disposition = 'inline'
@@ -131,6 +133,7 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
p['Content-Disposition'] = disposition + '; filename=' + patchname
msg.attach(p)
else:
+ body += '\n'.join(patchlines)
msg = mail.mimetextpatch(body, display=opts.get('test'))
flag = ' '.join(opts.get('flag'))
@@ -138,7 +141,7 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
flag = ' ' + flag
subj = desc[0].strip().rstrip('. ')
- if not numbered:
+ if not introneeded(opts, total):
subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
else:
tlen = len(str(total))
@@ -148,7 +151,6 @@ def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
return msg, subj, ds
emailopts = [
- ('', 'body', None, _('send patches as inline message text (default)')),
('a', 'attach', None, _('send patches as attachments')),
('i', 'inline', None, _('send patches as inline attachments')),
('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
@@ -206,9 +208,7 @@ def patchbomb(ui, repo, *revs, **opts):
By default the patch is included as text in the email body for
easy reviewing. Using the -a/--attach option will instead create
an attachment for the patch. With -i/--inline an inline attachment
- will be created. You can include a patch both as text in the email
- body and as a regular or an inline attachment by combining the
- -a/--attach or -i/--inline with the --body option.
+ will be created.
With -o/--outgoing, emails will be generated for patches not found
in the destination repository (or only those which are ancestors
@@ -273,18 +273,18 @@ def patchbomb(ui, repo, *revs, **opts):
def getoutgoing(dest, revs):
'''Return the revisions present locally but not in dest'''
- url = ui.expandpath(dest or 'default-push', dest or 'default')
- url = hg.parseurl(url)[0]
- ui.status(_('comparing with %s\n') % util.hidepassword(url))
-
- revs = [r for r in scmutil.revrange(repo, revs) if r >= 0]
- if not revs:
- revs = [len(repo) - 1]
- revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
- if not revs:
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest)
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
+ other = hg.peer(repo, opts, dest)
+ ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+ common, _anyinc, _heads = discovery.findcommonincoming(repo, other)
+ nodes = revs and map(repo.lookup, revs) or revs
+ o = repo.changelog.findmissing(common, heads=nodes)
+ if not o:
ui.status(_("no changes found\n"))
return []
- return [str(r) for r in revs]
+ return [str(repo.changelog.rev(r)) for r in o]
def getpatches(revs):
for r in scmutil.revrange(repo, revs):
@@ -305,7 +305,7 @@ def patchbomb(ui, repo, *revs, **opts):
finally:
try:
os.unlink(tmpfn)
- except OSError:
+ except:
pass
os.rmdir(tmpdir)
@@ -352,66 +352,51 @@ def patchbomb(ui, repo, *revs, **opts):
ui.write(_('\nWrite the introductory message for the '
'patch series.\n\n'))
body = ui.edit(body, sender)
- # Save series description in case sendmail fails
+ # Save serie description in case sendmail fails
msgfile = repo.opener('last-email.txt', 'wb')
msgfile.write(body)
msgfile.close()
return body
def getpatchmsgs(patches, patchnames=None):
+ jumbo = []
msgs = []
- ui.write(_('this patch series consists of %d patches.\n\n')
+ ui.write(_('This patch series consists of %d patches.\n\n')
% len(patches))
- # build the intro message, or skip it if the user declines
- if introwanted(opts, len(patches)):
- msg = makeintro(patches)
- if msg:
- msgs.append(msg)
-
- # are we going to send more than one message?
- numbered = len(msgs) + len(patches) > 1
-
- # now generate the actual patch messages
name = None
for i, p in enumerate(patches):
+ jumbo.extend(p)
if patchnames:
name = patchnames[i]
msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
- len(patches), numbered, name)
+ len(patches), name)
msgs.append(msg)
- return msgs
-
- def makeintro(patches):
- tlen = len(str(len(patches)))
+ if introneeded(opts, len(patches)):
+ tlen = len(str(len(patches)))
- flag = opts.get('flag') or ''
- if flag:
- flag = ' ' + ' '.join(flag)
- prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
-
- subj = (opts.get('subject') or
- prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
- if not subj:
- return None # skip intro if the user doesn't bother
+ flag = ' '.join(opts.get('flag'))
+ if flag:
+ subj = '[PATCH %0*d of %d %s]' % (tlen, 0, len(patches), flag)
+ else:
+ subj = '[PATCH %0*d of %d]' % (tlen, 0, len(patches))
+ subj += ' ' + (opts.get('subject') or
+ prompt(ui, 'Subject: ', rest=subj))
- subj = prefix + ' ' + subj
+ body = ''
+ ds = patch.diffstat(jumbo)
+ if ds and opts.get('diffstat'):
+ body = '\n' + ds
- body = ''
- if opts.get('diffstat'):
- # generate a cumulative diffstat of the whole patch series
- diffstat = patch.diffstat(sum(patches, []))
- body = '\n' + diffstat
- else:
- diffstat = None
+ body = getdescription(body, sender)
+ msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+ msg['Subject'] = mail.headencode(ui, subj, _charsets,
+ opts.get('test'))
- body = getdescription(body, sender)
- msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
- msg['Subject'] = mail.headencode(ui, subj, _charsets,
- opts.get('test'))
- return (msg, subj, diffstat)
+ msgs.insert(0, (msg, subj, ds))
+ return msgs
def getbundlemsgs(bundle):
subj = (opts.get('subject')
@@ -444,33 +429,29 @@ def patchbomb(ui, repo, *revs, **opts):
showaddrs = []
- def getaddrs(header, ask=False, default=None):
- configkey = header.lower()
- opt = header.replace('-', '_').lower()
- addrs = opts.get(opt)
+ def getaddrs(opt, prpt=None, default=None):
+ addrs = opts.get(opt.replace('-', '_'))
+ if opt != 'reply-to':
+ showaddr = '%s:' % opt.capitalize()
+ else:
+ showaddr = 'Reply-To:'
+
if addrs:
- showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
+ showaddrs.append('%s %s' % (showaddr, ', '.join(addrs)))
return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
- # not on the command line: fallback to config and then maybe ask
- addr = (ui.config('email', configkey) or
- ui.config('patchbomb', configkey) or
- '')
- if not addr and ask:
- addr = prompt(ui, header, default=default)
- if addr:
- showaddrs.append('%s: %s' % (header, addr))
- return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
- else:
- return default
+ addrs = ui.config('email', opt) or ui.config('patchbomb', opt) or ''
+ if not addrs and prpt:
+ addrs = prompt(ui, prpt, default)
+
+ if addrs:
+ showaddrs.append('%s %s' % (showaddr, addrs))
+ return mail.addrlistencode(ui, [addrs], _charsets, opts.get('test'))
- to = getaddrs('To', ask=True)
- if not to:
- # we can get here in non-interactive mode
- raise util.Abort(_('no recipient addresses provided'))
- cc = getaddrs('Cc', ask=True, default='') or []
- bcc = getaddrs('Bcc') or []
- replyto = getaddrs('Reply-To')
+ to = getaddrs('to', 'To')
+ cc = getaddrs('cc', 'Cc', '')
+ bcc = getaddrs('bcc')
+ replyto = getaddrs('reply-to')
if opts.get('diffstat') or opts.get('confirm'):
ui.write(_('\nFinal summary:\n\n'))
@@ -526,7 +507,7 @@ def patchbomb(ui, repo, *revs, **opts):
if replyto:
m['Reply-To'] = ', '.join(replyto)
if opts.get('test'):
- ui.status(_('displaying '), subj, ' ...\n')
+ ui.status(_('Displaying '), subj, ' ...\n')
ui.flush()
if 'PAGER' in os.environ and not ui.plain():
fp = util.popen(os.environ['PAGER'], 'w')
@@ -541,18 +522,30 @@ def patchbomb(ui, repo, *revs, **opts):
raise
if fp is not ui:
fp.close()
+ elif mbox:
+ ui.status(_('Writing '), subj, ' ...\n')
+ ui.progress(_('writing'), i, item=subj, total=len(msgs))
+ fp = open(mbox, 'In-Reply-To' in m and 'ab+' or 'wb+')
+ generator = email.Generator.Generator(fp, mangle_from_=True)
+ # Should be time.asctime(), but Windows prints 2-characters day
+ # of month instead of one. Make them print the same thing.
+ date = time.strftime('%a %b %d %H:%M:%S %Y',
+ time.localtime(start_time[0]))
+ fp.write('From %s %s\n' % (sender_addr, date))
+ generator.flatten(m, 0)
+ fp.write('\n\n')
+ fp.close()
else:
if not sendmail:
- sendmail = mail.connect(ui, mbox=mbox)
- ui.status(_('sending '), subj, ' ...\n')
+ sendmail = mail.connect(ui)
+ ui.status(_('Sending '), subj, ' ...\n')
ui.progress(_('sending'), i, item=subj, total=len(msgs))
- if not mbox:
- # Exim does not remove the Bcc field
- del m['Bcc']
+ # Exim does not remove the Bcc field
+ del m['Bcc']
fp = cStringIO.StringIO()
generator = email.Generator.Generator(fp, mangle_from_=False)
generator.flatten(m, 0)
- sendmail(sender_addr, to + bcc + cc, fp.getvalue())
+ sendmail(sender, to + bcc + cc, fp.getvalue())
ui.progress(_('writing'), None)
ui.progress(_('sending'), None)
diff --git a/hgext/progress.py b/hgext/progress.py
index 3cc3747..652fafe 100644
--- a/hgext/progress.py
+++ b/hgext/progress.py
@@ -2,8 +2,19 @@
#
# Copyright (C) 2010 Augie Fackler <durin42@gmail.com>
#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""show progress bars for some actions
@@ -16,9 +27,6 @@ The following settings are available::
[progress]
delay = 3 # number of seconds (float) before showing the progress bar
- changedelay = 1 # changedelay: minimum delay before showing a new topic.
- # If set to less than 3 * refresh, that value will
- # be used instead.
refresh = 0.1 # time in seconds between refreshes of the progress bar
format = topic bar number estimate # format of the progress bar
width = <none> # if set, the maximum width of the progress information
@@ -38,14 +46,14 @@ characters.
import sys
import time
+from mercurial import util
from mercurial.i18n import _
-testedwith = 'internal'
def spacejoin(*args):
return ' '.join(s for s in args if s)
def shouldprint(ui):
- return ui._isatty(sys.stderr) or ui.configbool('progress', 'assume-tty')
+ return (util.isatty(sys.stderr) or ui.configbool('progress', 'assume-tty'))
def fmtremaining(seconds):
if seconds < 60:
@@ -97,13 +105,9 @@ class progbar(object):
self.printed = False
self.lastprint = time.time() + float(self.ui.config(
'progress', 'delay', default=3))
- self.lasttopic = None
self.indetcount = 0
self.refresh = float(self.ui.config(
'progress', 'refresh', default=0.1))
- self.changedelay = max(3 * self.refresh,
- float(self.ui.config(
- 'progress', 'changedelay', default=1)))
self.order = self.ui.configlist(
'progress', 'format',
default=['topic', 'bar', 'number', 'estimate'])
@@ -180,7 +184,6 @@ class progbar(object):
else:
out = spacejoin(head, tail)
sys.stderr.write('\r' + out[:termwidth])
- self.lasttopic = topic
sys.stderr.flush()
def clear(self):
@@ -237,7 +240,7 @@ class progbar(object):
# truncate the list of topics assuming all topics within
# this one are also closed
if topic in self.topics:
- self.topics = self.topics[:self.topics.index(topic)]
+ self.topics = self.topics[:self.topics.index(topic)]
else:
if topic not in self.topics:
self.starttimes[topic] = now
@@ -245,36 +248,24 @@ class progbar(object):
self.topics.append(topic)
self.topicstates[topic] = pos, item, unit, total
if now - self.lastprint >= self.refresh and self.topics:
- if (self.lasttopic is None # first time we printed
- # not a topic change
- or topic == self.lasttopic
- # it's been long enough we should print anyway
- or now - self.lastprint >= self.changedelay):
- self.lastprint = now
- self.show(now, topic, *self.topicstates[topic])
-
-_singleton = None
+ self.lastprint = now
+ self.show(now, topic, *self.topicstates[topic])
def uisetup(ui):
- global _singleton
class progressui(ui.__class__):
_progbar = None
- def _quiet(self):
- return self.debugflag or self.quiet
-
def progress(self, *args, **opts):
- if not self._quiet():
- self._progbar.progress(*args, **opts)
+ self._progbar.progress(*args, **opts)
return super(progressui, self).progress(*args, **opts)
def write(self, *args, **opts):
- if not self._quiet() and self._progbar.printed:
+ if self._progbar.printed:
self._progbar.clear()
return super(progressui, self).write(*args, **opts)
def write_err(self, *args, **opts):
- if not self._quiet() and self._progbar.printed:
+ if self._progbar.printed:
self._progbar.clear()
return super(progressui, self).write_err(*args, **opts)
@@ -287,9 +278,7 @@ def uisetup(ui):
# we instantiate one globally shared progress bar to avoid
# competing progress bars when multiple UI objects get created
if not progressui._progbar:
- if _singleton is None:
- _singleton = progbar(ui)
- progressui._progbar = _singleton
+ progressui._progbar = progbar(ui)
def reposetup(ui, repo):
uisetup(repo.ui)
diff --git a/hgext/purge.py b/hgext/purge.py
index 8ec0da4..4179ea4 100644
--- a/hgext/purge.py
+++ b/hgext/purge.py
@@ -20,7 +20,8 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'''command to delete untracked files from the working directory'''
@@ -30,7 +31,6 @@ import os, stat
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
@command('purge|clean',
[('a', 'abort-on-err', None, _('abort if an error occurs')),
@@ -101,10 +101,10 @@ def purge(ui, repo, *dirs, **opts):
status = repo.status(match=match, ignored=opts['all'], unknown=True)
for f in sorted(status[4] + status[5]):
- ui.note(_('removing file %s\n') % f)
+ ui.note(_('Removing file %s\n') % f)
remove(removefile, f)
for f in sorted(directories, reverse=True):
if match(f) and not os.listdir(repo.wjoin(f)):
- ui.note(_('removing directory %s\n') % f)
+ ui.note(_('Removing directory %s\n') % f)
remove(os.rmdir, f)
diff --git a/hgext/rebase.py b/hgext/rebase.py
index f276fcf..ad62f8a 100644
--- a/hgext/rebase.py
+++ b/hgext/rebase.py
@@ -15,7 +15,7 @@ http://mercurial.selenic.com/wiki/RebaseExtension
'''
from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
-from mercurial import extensions, patch, scmutil, phases
+from mercurial import extensions, copies, patch
from mercurial.commands import templateopts
from mercurial.node import nullrev
from mercurial.lock import release
@@ -26,7 +26,6 @@ nullmerge = -2
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
@command('rebase',
[('s', 'source', '',
@@ -35,25 +34,23 @@ testedwith = 'internal'
_('rebase from the base of the specified changeset '
'(up to greatest common ancestor of base and dest)'),
_('REV')),
- ('r', 'rev', [],
- _('rebase these revisions'),
- _('REV')),
('d', 'dest', '',
_('rebase onto the specified changeset'), _('REV')),
('', 'collapse', False, _('collapse the rebased changesets')),
('m', 'message', '',
_('use text as collapse commit message'), _('TEXT')),
- ('e', 'edit', False, _('invoke editor on commit messages')),
('l', 'logfile', '',
_('read collapse commit message from file'), _('FILE')),
('', 'keep', False, _('keep original changesets')),
('', 'keepbranches', False, _('keep original branch names')),
- ('D', 'detach', False, _('(DEPRECATED)')),
+ ('', 'detach', False, _('force detaching of source from its original '
+ 'branch')),
('t', 'tool', '', _('specify merge tool')),
('c', 'continue', False, _('continue an interrupted rebase')),
('a', 'abort', False, _('abort an interrupted rebase'))] +
templateopts,
- _('[-s REV | -b REV] [-d REV] [OPTION]'))
+ _('hg rebase [-s REV | -b REV] [-d REV] [options]\n'
+ 'hg rebase {-a|-c}'))
def rebase(ui, repo, **opts):
"""move changeset (and descendants) to a different branch
@@ -108,20 +105,15 @@ def rebase(ui, repo, **opts):
skipped = set()
targetancestors = set()
- editor = None
- if opts.get('edit'):
- editor = cmdutil.commitforceeditor
-
lock = wlock = None
try:
- wlock = repo.wlock()
lock = repo.lock()
+ wlock = repo.wlock()
# Validate input and define rebasing points
destf = opts.get('dest', None)
srcf = opts.get('source', None)
basef = opts.get('base', None)
- revf = opts.get('rev', [])
contf = opts.get('continue')
abortf = opts.get('abort')
collapsef = opts.get('collapse', False)
@@ -129,6 +121,7 @@ def rebase(ui, repo, **opts):
extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
keepf = opts.get('keep', False)
keepbranchesf = opts.get('keepbranches', False)
+ detachf = opts.get('detach', False)
# keepopen is not meant for use on the command line, but by
# other extensions
keepopen = opts.get('keepopen', False)
@@ -143,6 +136,8 @@ def rebase(ui, repo, **opts):
if collapsef:
raise util.Abort(
_('cannot use collapse with continue or abort'))
+ if detachf:
+ raise util.Abort(_('cannot use detach with continue or abort'))
if srcf or basef or destf:
raise util.Abort(
_('abort and continue do not allow specifying revisions'))
@@ -156,56 +151,16 @@ def rebase(ui, repo, **opts):
else:
if srcf and basef:
raise util.Abort(_('cannot specify both a '
- 'source and a base'))
- if revf and basef:
- raise util.Abort(_('cannot specify both a '
'revision and a base'))
- if revf and srcf:
- raise util.Abort(_('cannot specify both a '
- 'revision and a source'))
+ if detachf:
+ if not srcf:
+ raise util.Abort(
+ _('detach requires a revision to be specified'))
+ if basef:
+ raise util.Abort(_('cannot specify a base with detach'))
cmdutil.bailifchanged(repo)
-
- if not destf:
- # Destination defaults to the latest revision in the
- # current branch
- branch = repo[None].branch()
- dest = repo[branch]
- else:
- dest = scmutil.revsingle(repo, destf)
-
- if revf:
- rebaseset = repo.revs('%lr', revf)
- elif srcf:
- src = scmutil.revrange(repo, [srcf])
- rebaseset = repo.revs('(%ld)::', src)
- else:
- base = scmutil.revrange(repo, [basef or '.'])
- rebaseset = repo.revs(
- '(children(ancestor(%ld, %d)) and ::(%ld))::',
- base, dest, base)
-
- if rebaseset:
- root = min(rebaseset)
- else:
- root = None
-
- if not rebaseset:
- repo.ui.debug('base is ancestor of destination\n')
- result = None
- elif not keepf and list(repo.revs('first(children(%ld) - %ld)',
- rebaseset, rebaseset)):
- raise util.Abort(
- _("can't remove original changesets with"
- " unrebased descendants"),
- hint=_('use --keep to keep original changesets'))
- elif not keepf and not repo[root].mutable():
- raise util.Abort(_("can't rebase immutable changeset %s")
- % repo[root],
- hint=_('see hg help phases for details'))
- else:
- result = buildstate(repo, dest, rebaseset, collapsef)
-
+ result = buildstate(repo, destf, srcf, basef, detachf)
if not result:
# Empty state built, nothing to rebase
ui.status(_('nothing to rebase\n'))
@@ -213,8 +168,7 @@ def rebase(ui, repo, **opts):
else:
originalwd, target, state = result
if collapsef:
- targetancestors = set(repo.changelog.ancestors([target]))
- targetancestors.add(target)
+ targetancestors = set(repo.changelog.ancestors(target))
external = checkexternal(repo, state, targetancestors)
if keepbranchesf:
@@ -232,14 +186,11 @@ def rebase(ui, repo, **opts):
# Rebase
if not targetancestors:
- targetancestors = set(repo.changelog.ancestors([target]))
+ targetancestors = set(repo.changelog.ancestors(target))
targetancestors.add(target)
# Keep track of the current bookmarks in order to reset them later
currentbookmarks = repo._bookmarks.copy()
- activebookmark = repo._bookmarkcurrent
- if activebookmark:
- bookmarks.unsetcurrent(repo)
sortedstate = sorted(state)
total = len(sortedstate)
@@ -258,19 +209,18 @@ def rebase(ui, repo, **opts):
else:
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
- stats = rebasenode(repo, rev, p1, state, collapsef)
+ stats = rebasenode(repo, rev, p1, state)
if stats and stats[3] > 0:
raise util.Abort(_('unresolved conflicts (see hg '
'resolve, then hg rebase --continue)'))
finally:
ui.setconfig('ui', 'forcemerge', '')
- cmdutil.duplicatecopies(repo, rev, target)
+ updatedirstate(repo, rev, target, p2)
if not collapsef:
- newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
- editor=editor)
+ newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn)
else:
# Skip commit if we are collapsing
- repo.setparents(repo[p1].node())
+ repo.dirstate.setparents(repo[p1].node())
newrev = None
# Update the state
if newrev is not None:
@@ -297,7 +247,7 @@ def rebase(ui, repo, **opts):
commitmsg += '\n* %s' % repo[rebased].description()
commitmsg = ui.edit(commitmsg, repo.ui.username())
newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
- extrafn=extrafn, editor=editor)
+ extrafn=extrafn)
if 'qtip' in repo.tags():
updatemq(repo, state, skipped, **opts)
@@ -313,7 +263,7 @@ def rebase(ui, repo, **opts):
# Remove no more useful revisions
rebased = [rev for rev in state if state[rev] != nullmerge]
if rebased:
- if set(repo.changelog.descendants([min(rebased)])) - set(state):
+ if set(repo.changelog.descendants(min(rebased))) - set(state):
ui.warn(_("warning: new changesets detected "
"on source branch, not stripping\n"))
else:
@@ -329,11 +279,6 @@ def rebase(ui, repo, **opts):
util.unlinkpath(repo.sjoin('undo'))
if skipped:
ui.note(_("%d revisions have been skipped\n") % len(skipped))
-
- if (activebookmark and
- repo['tip'].node() == repo._bookmarks[activebookmark]):
- bookmarks.setcurrent(repo, activebookmark)
-
finally:
release(lock, wlock)
@@ -356,10 +301,24 @@ def checkexternal(repo, state, targetancestors):
external = p.rev()
return external
-def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
+def updatedirstate(repo, rev, p1, p2):
+ """Keep track of renamed files in the revision that is going to be rebased
+ """
+ # Here we simulate the copies and renames in the source changeset
+ cop, diver = copies.copies(repo, repo[rev], repo[p1], repo[p2], True)
+ m1 = repo[rev].manifest()
+ m2 = repo[p1].manifest()
+ for k, v in cop.iteritems():
+ if k in m1:
+ if v in m1 or v in m2:
+ repo.dirstate.copy(v, k)
+ if v in m2 and v not in m1 and k in m2:
+ repo.dirstate.remove(v)
+
+def concludenode(repo, rev, p1, p2, commitmsg=None, extrafn=None):
'Commit the changes and store useful information in extra'
try:
- repo.setparents(repo[p1].node(), repo[p2].node())
+ repo.dirstate.setparents(repo[p1].node(), repo[p2].node())
ctx = repo[rev]
if commitmsg is None:
commitmsg = ctx.description()
@@ -368,20 +327,15 @@ def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
extrafn(ctx, extra)
# Commit might fail if unresolved files exist
newrev = repo.commit(text=commitmsg, user=ctx.user(),
- date=ctx.date(), extra=extra, editor=editor)
+ date=ctx.date(), extra=extra)
repo.dirstate.setbranch(repo[newrev].branch())
- targetphase = max(ctx.phase(), phases.draft)
- # retractboundary doesn't overwrite upper phase inherited from parent
- newnode = repo[newrev].node()
- if newnode:
- phases.retractboundary(repo, targetphase, [newnode])
return newrev
except util.Abort:
# Invalidate the previous setparents
repo.dirstate.invalidate()
raise
-def rebasenode(repo, rev, p1, state, collapse):
+def rebasenode(repo, rev, p1, state):
'Rebase a single revision'
# Merge phase
# Update to target and merge it with local
@@ -395,9 +349,7 @@ def rebasenode(repo, rev, p1, state, collapse):
base = None
if repo[rev].rev() != repo[min(state)].rev():
base = repo[rev].p1().node()
- # When collapsing in-place, the parent is the common ancestor, we
- # have to allow merging with it.
- return merge.update(repo, rev, True, True, False, base, collapse)
+ return merge.update(repo, rev, True, True, False, base)
def defineparents(repo, rev, target, state, targetancestors):
'Return the new parent relationship of the revision that will be rebased'
@@ -446,7 +398,6 @@ def updatemq(repo, state, skipped, **opts):
mqrebase = {}
mq = repo.mq
original_series = mq.fullseries[:]
- skippedpatches = set()
for p in mq.applied:
rev = repo[p.node].rev()
@@ -454,9 +405,6 @@ def updatemq(repo, state, skipped, **opts):
repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
(rev, p.name))
mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
- else:
- # Applied but not rebased, not sure this should happen
- skippedpatches.add(p.name)
if mqrebase:
mq.finish(repo, mqrebase.keys())
@@ -468,26 +416,21 @@ def updatemq(repo, state, skipped, **opts):
repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
mq.qimport(repo, (), patchname=name, git=isgit,
rev=[str(state[rev])])
- else:
- # Rebased and skipped
- skippedpatches.add(mqrebase[rev][0])
-
- # Patches were either applied and rebased and imported in
- # order, applied and removed or unapplied. Discard the removed
- # ones while preserving the original series order and guards.
- newseries = [s for s in original_series
- if mq.guard_re.split(s, 1)[0] not in skippedpatches]
- mq.fullseries[:] = newseries
- mq.seriesdirty = True
+
+ # restore old series to preserve guards
+ mq.fullseries = original_series
+ mq.series_dirty = True
mq.savedirty()
def updatebookmarks(repo, nstate, originalbookmarks, **opts):
'Move bookmarks to their correct changesets'
+ current = repo._bookmarkcurrent
for k, v in originalbookmarks.iteritems():
if v in nstate:
if nstate[v] != nullmerge:
- # update the bookmarks for revs that have moved
- repo._bookmarks[k] = nstate[v]
+ # reset the pointer if the bookmark was moved incorrectly
+ if k != current:
+ repo._bookmarks[k] = nstate[v]
bookmarks.write(repo)
@@ -503,10 +446,7 @@ def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
f.write('%d\n' % int(keepbranches))
for d, v in state.iteritems():
oldrev = repo[d].hex()
- if v != nullmerge:
- newrev = repo[v].hex()
- else:
- newrev = v
+ newrev = repo[v].hex()
f.write("%s:%s\n" % (oldrev, newrev))
f.close()
repo.ui.debug('rebase status stored\n')
@@ -539,10 +479,7 @@ def restorestatus(repo):
keepbranches = bool(int(l))
else:
oldrev, newrev = l.split(':')
- if newrev != str(nullmerge):
- state[repo[oldrev].rev()] = repo[newrev].rev()
- else:
- state[repo[oldrev].rev()] = int(newrev)
+ state[repo[oldrev].rev()] = repo[newrev].rev()
skipped = set()
# recompute the set of skipped revs
if not collapse:
@@ -562,19 +499,9 @@ def restorestatus(repo):
def abort(repo, originalwd, target, state):
'Restore the repository to its original state'
- dstates = [s for s in state.values() if s != nullrev]
- immutable = [d for d in dstates if not repo[d].mutable()]
- if immutable:
- raise util.Abort(_("can't abort rebase due to immutable changesets %s")
- % ', '.join(str(repo[r]) for r in immutable),
- hint=_('see hg help phases for details'))
-
- descendants = set()
- if dstates:
- descendants = set(repo.changelog.descendants(dstates))
- if descendants - set(dstates):
+ if set(repo.changelog.descendants(target)) - set(state.values()):
repo.ui.warn(_("warning: new changesets detected on target branch, "
- "can't abort\n"))
+ "can't abort\n"))
return -1
else:
# Strip from the first rebased revision
@@ -588,81 +515,68 @@ def abort(repo, originalwd, target, state):
repo.ui.warn(_('rebase aborted\n'))
return 0
-def buildstate(repo, dest, rebaseset, collapse):
- '''Define which revisions are going to be rebased and where
+def buildstate(repo, dest, src, base, detach):
+ 'Define which revisions are going to be rebased and where'
+ targetancestors = set()
+ detachset = set()
- repo: repo
- dest: context
- rebaseset: set of rev
- '''
+ if not dest:
+ # Destination defaults to the latest revision in the current branch
+ branch = repo[None].branch()
+ dest = repo[branch].rev()
+ else:
+ dest = repo[dest].rev()
# This check isn't strictly necessary, since mq detects commits over an
# applied patch. But it prevents messing up the working directory when
# a partially completed rebase is blocked by mq.
- if 'qtip' in repo.tags() and (dest.node() in
+ if 'qtip' in repo.tags() and (repo[dest].node() in
[s.node for s in repo.mq.applied]):
raise util.Abort(_('cannot rebase onto an applied mq patch'))
- roots = list(repo.set('roots(%ld)', rebaseset))
- if not roots:
- raise util.Abort(_('no matching revisions'))
- if len(roots) > 1:
- raise util.Abort(_("can't rebase multiple roots"))
- root = roots[0]
-
- commonbase = root.ancestor(dest)
- if commonbase == root:
- raise util.Abort(_('source is ancestor of destination'))
- if commonbase == dest:
- samebranch = root.branch() == dest.branch()
- if not collapse and samebranch and root in dest.children():
- repo.ui.debug('source is a child of destination\n')
+ if src:
+ commonbase = repo[src].ancestor(repo[dest])
+ samebranch = repo[src].branch() == repo[dest].branch()
+ if commonbase == repo[src]:
+ raise util.Abort(_('source is ancestor of destination'))
+ if samebranch and commonbase == repo[dest]:
+ raise util.Abort(_('source is descendant of destination'))
+ source = repo[src].rev()
+ if detach:
+ # We need to keep track of source's ancestors up to the common base
+ srcancestors = set(repo.changelog.ancestors(source))
+ baseancestors = set(repo.changelog.ancestors(commonbase.rev()))
+ detachset = srcancestors - baseancestors
+ detachset.discard(commonbase.rev())
+ else:
+ if base:
+ cwd = repo[base].rev()
+ else:
+ cwd = repo['.'].rev()
+
+ if cwd == dest:
+ repo.ui.debug('source and destination are the same\n')
+ return None
+
+ targetancestors = set(repo.changelog.ancestors(dest))
+ if cwd in targetancestors:
+ repo.ui.debug('source is ancestor of destination\n')
+ return None
+
+ cwdancestors = set(repo.changelog.ancestors(cwd))
+ if dest in cwdancestors:
+ repo.ui.debug('source is descendant of destination\n')
return None
- repo.ui.debug('rebase onto %d starting from %d\n' % (dest, root))
- state = dict.fromkeys(rebaseset, nullrev)
- # Rebase tries to turn <dest> into a parent of <root> while
- # preserving the number of parents of rebased changesets:
- #
- # - A changeset with a single parent will always be rebased as a
- # changeset with a single parent.
- #
- # - A merge will be rebased as merge unless its parents are both
- # ancestors of <dest> or are themselves in the rebased set and
- # pruned while rebased.
- #
- # If one parent of <root> is an ancestor of <dest>, the rebased
- # version of this parent will be <dest>. This is always true with
- # --base option.
- #
- # Otherwise, we need to *replace* the original parents with
- # <dest>. This "detaches" the rebased set from its former location
- # and rebases it onto <dest>. Changes introduced by ancestors of
- # <root> not common with <dest> (the detachset, marked as
- # nullmerge) are "removed" from the rebased changesets.
- #
- # - If <root> has a single parent, set it to <dest>.
- #
- # - If <root> is a merge, we cannot decide which parent to
- # replace, the rebase operation is not clearly defined.
- #
- # The table below sums up this behavior:
- #
- # +--------------------+----------------------+-------------------------+
- # | | one parent | merge |
- # +--------------------+----------------------+-------------------------+
- # | parent in ::<dest> | new parent is <dest> | parents in ::<dest> are |
- # | | | remapped to <dest> |
- # +--------------------+----------------------+-------------------------+
- # | unrelated source | new parent is <dest> | ambiguous, abort |
- # +--------------------+----------------------+-------------------------+
- #
- # The actual abort is handled by `defineparents`
- if len(root.parents()) <= 1:
- # (strict) ancestors of <root> not ancestors of <dest>
- detachset = repo.revs('::%d - ::%d - %d', root, commonbase, root)
- state.update(dict.fromkeys(detachset, nullmerge))
- return repo['.'].rev(), dest.rev(), state
+ cwdancestors.add(cwd)
+ rebasingbranch = cwdancestors - targetancestors
+ source = min(rebasingbranch)
+
+ repo.ui.debug('rebase onto %d starting from %d\n' % (dest, source))
+ state = dict.fromkeys(repo.changelog.descendants(source), nullrev)
+ state.update(dict.fromkeys(detachset, nullmerge))
+ state[source] = nullrev
+ return repo['.'].rev(), repo[dest].rev(), state
def pullrebase(orig, ui, repo, *args, **opts):
'Call rebase after pull if the latter has been invoked with --rebase'
@@ -672,7 +586,6 @@ def pullrebase(orig, ui, repo, *args, **opts):
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
- movemarkfrom = repo['.'].node()
cmdutil.bailifchanged(repo)
revsprepull = len(repo)
origpostincoming = commands.postincoming
@@ -691,9 +604,6 @@ def pullrebase(orig, ui, repo, *args, **opts):
if dest != repo['.'].rev():
# there was nothing to rebase we force an update
hg.update(repo, dest)
- if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
- ui.status(_("updating bookmark %s\n")
- % repo._bookmarkcurrent)
else:
if opts.get('tool'):
raise util.Abort(_('--tool can only be used with --rebase'))
diff --git a/hgext/record.py b/hgext/record.py
index ec9e384..1bbfb11 100644
--- a/hgext/record.py
+++ b/hgext/record.py
@@ -14,7 +14,6 @@ import copy, cStringIO, errno, os, re, shutil, tempfile
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
@@ -262,7 +261,7 @@ def parsepatch(fp):
def filterpatch(ui, headers):
"""Interactively filter patch chunks into applied-only chunks"""
- def prompt(skipfile, skipall, query, chunk):
+ def prompt(skipfile, skipall, query):
"""prompt query, and process base inputs
- y/n for the rest of file
@@ -272,16 +271,14 @@ def filterpatch(ui, headers):
Return True/False and possibly updated skipfile and skipall.
"""
- newpatches = None
if skipall is not None:
- return skipall, skipfile, skipall, newpatches
+ return skipall, skipfile, skipall
if skipfile is not None:
- return skipfile, skipfile, skipall, newpatches
+ return skipfile, skipfile, skipall
while True:
- resps = _('[Ynesfdaq?]')
+ resps = _('[Ynsfdaq?]')
choices = (_('&Yes, record this change'),
_('&No, skip this change'),
- _('&Edit the change manually'),
_('&Skip remaining changes to this file'),
_('Record remaining changes to this &file'),
_('&Done, skip remaining changes and files'),
@@ -290,7 +287,7 @@ def filterpatch(ui, headers):
_('&?'))
r = ui.promptchoice("%s %s" % (query, resps), choices)
ui.write("\n")
- if r == 8: # ?
+ if r == 7: # ?
doc = gettext(record.__doc__)
c = doc.find('::') + 2
for l in doc[c:].splitlines():
@@ -301,70 +298,17 @@ def filterpatch(ui, headers):
ret = True
elif r == 1: # no
ret = False
- elif r == 2: # Edit patch
- if chunk is None:
- ui.write(_('cannot edit patch for whole file'))
- ui.write("\n")
- continue
- if chunk.header.binary():
- ui.write(_('cannot edit patch for binary file'))
- ui.write("\n")
- continue
- # Patch comment based on the Git one (based on comment at end of
- # http://mercurial.selenic.com/wiki/RecordExtension)
- phelp = '---' + _("""
-To remove '-' lines, make them ' ' lines (context).
-To remove '+' lines, delete them.
-Lines starting with # will be removed from the patch.
-
-If the patch applies cleanly, the edited hunk will immediately be
-added to the record list. If it does not apply cleanly, a rejects
-file will be generated: you can use that when you try again. If
-all lines of the hunk are removed, then the edit is aborted and
-the hunk is left unchanged.
-""")
- (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
- suffix=".diff", text=True)
- ncpatchfp = None
- try:
- # Write the initial patch
- f = os.fdopen(patchfd, "w")
- chunk.header.write(f)
- chunk.write(f)
- f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
- f.close()
- # Start the editor and wait for it to complete
- editor = ui.geteditor()
- util.system("%s \"%s\"" % (editor, patchfn),
- environ={'HGUSER': ui.username()},
- onerr=util.Abort, errprefix=_("edit failed"),
- out=ui.fout)
- # Remove comment lines
- patchfp = open(patchfn)
- ncpatchfp = cStringIO.StringIO()
- for line in patchfp:
- if not line.startswith('#'):
- ncpatchfp.write(line)
- patchfp.close()
- ncpatchfp.seek(0)
- newpatches = parsepatch(ncpatchfp)
- finally:
- os.unlink(patchfn)
- del ncpatchfp
- # Signal that the chunk shouldn't be applied as-is, but
- # provide the new patch to be used instead.
- ret = False
- elif r == 3: # Skip
+ elif r == 2: # Skip
ret = skipfile = False
- elif r == 4: # file (Record remaining)
+ elif r == 3: # file (Record remaining)
ret = skipfile = True
- elif r == 5: # done, skip remaining
+ elif r == 4: # done, skip remaining
ret = skipall = False
- elif r == 6: # all
+ elif r == 5: # all
ret = skipall = True
- elif r == 7: # quit
+ elif r == 6: # quit
raise util.Abort(_('user quit'))
- return ret, skipfile, skipall, newpatches
+ return ret, skipfile, skipall
seen = set()
applied = {} # 'filename' -> [] of chunks
@@ -381,8 +325,8 @@ the hunk is left unchanged.
if skipall is None:
h.pretty(ui)
msg = (_('examine changes to %s?') %
- _(' and ').join("'%s'" % f for f in h.files()))
- r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
+ _(' and ').join(map(repr, h.files())))
+ r, skipfile, skipall = prompt(skipfile, skipall, msg)
if not r:
continue
applied[h.filename()] = [h]
@@ -398,19 +342,12 @@ the hunk is left unchanged.
idx = pos - len(h.hunks) + i
msg = _('record change %d/%d to %r?') % (idx, total,
chunk.filename())
- r, skipfile, skipall, newpatches = prompt(skipfile,
- skipall, msg, chunk)
+ r, skipfile, skipall = prompt(skipfile, skipall, msg)
if r:
if fixoffset:
chunk = copy.copy(chunk)
chunk.toline += fixoffset
applied[chunk.filename()].append(chunk)
- elif newpatches is not None:
- for newpatch in newpatches:
- for newhunk in newpatch.hunks:
- if fixoffset:
- newhunk.toline += fixoffset
- applied[newhunk.filename()].append(newhunk)
else:
fixoffset += chunk.removed - chunk.added
return sum([h for h in applied.itervalues()
@@ -435,7 +372,6 @@ def record(ui, repo, *pats, **opts):
y - record this change
n - skip this change
- e - edit this change manually
s - skip remaining changes to this file
f - record remaining changes to this file
@@ -517,11 +453,10 @@ def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
'(use "hg commit" instead)'))
changes = repo.status(match=match)[:3]
- diffopts = mdiff.diffopts(
- git=True, nodates=True,
- ignorews=opts.get('ignore_all_space'),
- ignorewsamount=opts.get('ignore_space_change'),
- ignoreblanklines=opts.get('ignore_blank_lines'))
+ diffopts = mdiff.diffopts(git=True, nodates=True,
+ ignorews=opts.get('ignore_all_space'),
+ ignorewsamount=opts.get('ignore_space_change'),
+ ignoreblanklines=opts.get('ignore_blank_lines'))
chunks = patch.diff(repo, changes=changes, opts=diffopts)
fp = cStringIO.StringIO()
fp.write(''.join(chunks))
diff --git a/hgext/relink.py b/hgext/relink.py
index f2e6bf1..f4d8f09 100644
--- a/hgext/relink.py
+++ b/hgext/relink.py
@@ -11,8 +11,6 @@ from mercurial import hg, util
from mercurial.i18n import _
import os, stat
-testedwith = 'internal'
-
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
@@ -38,11 +36,12 @@ def relink(ui, repo, origin=None, **opts):
command is running. (Both repositories will be locked against
writes.)
"""
- if (not util.safehasattr(util, 'samefile') or
- not util.safehasattr(util, 'samedevice')):
+ if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'):
raise util.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
+ if not src.local():
+ raise util.Abort(_('must specify local origin repository'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
@@ -79,7 +78,7 @@ def collect(src, ui):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
- if filename[-2:] not in ('.d', '.i'):
+ if not filename[-2:] in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
diff --git a/hgext/schemes.py b/hgext/schemes.py
index 51ab3ed..5ea285f 100644
--- a/hgext/schemes.py
+++ b/hgext/schemes.py
@@ -44,8 +44,6 @@ import os, re
from mercurial import extensions, hg, templater, util
from mercurial.i18n import _
-testedwith = 'internal'
-
class ShortRepository(object):
def __init__(self, url, scheme, templater):
@@ -74,10 +72,9 @@ class ShortRepository(object):
return hg._peerlookup(url).instance(ui, url, create)
def hasdriveletter(orig, path):
- if path:
- for scheme in schemes:
- if path.startswith(scheme + ':'):
- return False
+ for scheme in schemes:
+ if path.startswith(scheme + ':'):
+ return False
return orig(path)
schemes = {
diff --git a/hgext/share.py b/hgext/share.py
index fb11921..cc33148 100644
--- a/hgext/share.py
+++ b/hgext/share.py
@@ -6,9 +6,7 @@
'''share a common history between several working directories'''
from mercurial.i18n import _
-from mercurial import hg, commands, util
-
-testedwith = 'internal'
+from mercurial import hg, commands
def share(ui, source, dest=None, noupdate=False):
"""create a new shared repository
@@ -30,46 +28,11 @@ def share(ui, source, dest=None, noupdate=False):
return hg.share(ui, source, dest, not noupdate)
-def unshare(ui, repo):
- """convert a shared repository to a normal one
-
- Copy the store data to the repo and remove the sharedpath data.
- """
-
- if repo.sharedpath == repo.path:
- raise util.Abort(_("this is not a shared repo"))
-
- destlock = lock = None
- lock = repo.lock()
- try:
- # we use locks here because if we race with commit, we
- # can end up with extra data in the cloned revlogs that's
- # not pointed to by changesets, thus causing verify to
- # fail
-
- destlock = hg.copystore(ui, repo, repo.path)
-
- sharefile = repo.join('sharedpath')
- util.rename(sharefile, sharefile + '.old')
-
- repo.requirements.discard('sharedpath')
- repo._writerequirements()
- finally:
- destlock and destlock.release()
- lock and lock.release()
-
- # update store, spath, sopener and sjoin of repo
- repo.__init__(ui, repo.root)
-
cmdtable = {
"share":
(share,
[('U', 'noupdate', None, _('do not create a working copy'))],
_('[-U] SOURCE [DEST]')),
- "unshare":
- (unshare,
- [],
- ''),
}
commands.norepo += " share"
diff --git a/hgext/transplant.py b/hgext/transplant.py
index a506c0c..90f99eb 100644
--- a/hgext/transplant.py
+++ b/hgext/transplant.py
@@ -20,12 +20,8 @@ from mercurial import bundlerepo, hg, merge, match
from mercurial import patch, revlog, scmutil, util, error, cmdutil
from mercurial import revset, templatekw
-class TransplantError(error.Abort):
- pass
-
cmdtable = {}
command = cmdutil.command(cmdtable)
-testedwith = 'internal'
class transplantentry(object):
def __init__(self, lnode, rnode):
@@ -85,25 +81,19 @@ class transplanter(object):
self.opener = scmutil.opener(self.path)
self.transplants = transplants(self.path, 'transplants',
opener=self.opener)
- self.editor = None
def applied(self, repo, node, parent):
'''returns True if a node is already an ancestor of parent
- or is parent or has already been transplanted'''
- if hasnode(repo, parent):
- parentrev = repo.changelog.rev(parent)
+ or has already been transplanted'''
if hasnode(repo, node):
- rev = repo.changelog.rev(node)
- reachable = repo.changelog.incancestors([parentrev], rev)
- if rev in reachable:
+ if node in repo.changelog.reachable(parent, stop=node):
return True
for t in self.transplants.get(node):
# it might have been stripped
if not hasnode(repo, t.lnode):
self.transplants.remove(t)
return False
- lnoderev = repo.changelog.rev(t.lnode)
- if lnoderev in repo.changelog.incancestors([parentrev], lnoderev):
+ if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
return True
return False
@@ -115,11 +105,10 @@ class transplanter(object):
diffopts = patch.diffopts(self.ui, opts)
diffopts.git = True
- lock = wlock = tr = None
+ lock = wlock = None
try:
wlock = repo.wlock()
lock = repo.lock()
- tr = repo.transaction('transplant')
for rev in revs:
node = revmap[rev]
revstr = '%s:%s' % (rev, short(node))
@@ -130,7 +119,7 @@ class transplanter(object):
continue
parents = source.changelog.parents(node)
- if not (opts.get('filter') or opts.get('log')):
+ if not opts.get('filter'):
# If the changeset parent is the same as the
# wdir's parent, just pull it.
if parents[0] == p1:
@@ -139,7 +128,7 @@ class transplanter(object):
continue
if pulls:
if source != repo:
- repo.pull(source.peer(), heads=pulls)
+ repo.pull(source, heads=pulls)
merge.update(repo, pulls[-1], False, False, None)
p1, p2 = repo.dirstate.parents()
pulls = []
@@ -153,26 +142,14 @@ class transplanter(object):
if not hasnode(repo, node):
repo.pull(source, heads=[node])
- skipmerge = False
if parents[1] != revlog.nullid:
- if not opts.get('parent'):
- self.ui.note(_('skipping merge changeset %s:%s\n')
- % (rev, short(node)))
- skipmerge = True
- else:
- parent = source.lookup(opts['parent'])
- if parent not in parents:
- raise util.Abort(_('%s is not a parent of %s') %
- (short(parent), short(node)))
- else:
- parent = parents[0]
-
- if skipmerge:
+ self.ui.note(_('skipping merge changeset %s:%s\n')
+ % (rev, short(node)))
patchfile = None
else:
fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
fp = os.fdopen(fd, 'w')
- gen = patch.diff(source, parent, node, opts=diffopts)
+ gen = patch.diff(source, parents[0], node, opts=diffopts)
for chunk in gen:
fp.write(chunk)
fp.close()
@@ -180,17 +157,11 @@ class transplanter(object):
del revmap[rev]
if patchfile or domerge:
try:
- try:
- n = self.applyone(repo, node,
- source.changelog.read(node),
- patchfile, merge=domerge,
- log=opts.get('log'),
- filter=opts.get('filter'))
- except TransplantError:
- # Do not rollback, it is up to the user to
- # fix the merge or cancel everything
- tr.close()
- raise
+ n = self.applyone(repo, node,
+ source.changelog.read(node),
+ patchfile, merge=domerge,
+ log=opts.get('log'),
+ filter=opts.get('filter'))
if n and domerge:
self.ui.status(_('%s merged at %s\n') % (revstr,
short(n)))
@@ -201,15 +172,12 @@ class transplanter(object):
finally:
if patchfile:
os.unlink(patchfile)
- tr.close()
if pulls:
- repo.pull(source.peer(), heads=pulls)
+ repo.pull(source, heads=pulls)
merge.update(repo, pulls[-1], False, False, None)
finally:
self.saveseries(revmap, merges)
self.transplants.write()
- if tr:
- tr.release()
lock.release()
wlock.release()
@@ -263,6 +231,9 @@ class transplanter(object):
files = set()
patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
files = list(files)
+ if not files:
+ self.ui.warn(_('%s: empty changeset') % revlog.hex(node))
+ return None
except Exception, inst:
seriespath = os.path.join(self.path, 'series')
if os.path.exists(seriespath):
@@ -271,22 +242,27 @@ class transplanter(object):
p2 = node
self.log(user, date, message, p1, p2, merge=merge)
self.ui.write(str(inst) + '\n')
- raise TransplantError(_('fix up the merge and run '
- 'hg transplant --continue'))
+ raise util.Abort(_('fix up the merge and run '
+ 'hg transplant --continue'))
else:
files = None
if merge:
p1, p2 = repo.dirstate.parents()
- repo.setparents(p1, node)
+ repo.dirstate.setparents(p1, node)
m = match.always(repo.root, '')
else:
m = match.exact(repo.root, '', files)
- n = repo.commit(message, user, date, extra=extra, match=m,
- editor=self.editor)
+ n = repo.commit(message, user, date, extra=extra, match=m)
if not n:
- self.ui.warn(_('skipping emptied changeset %s\n') % short(node))
- return None
+ # Crash here to prevent an unclear crash later, in
+ # transplants.write(). This can happen if patch.patch()
+ # does nothing but claims success or if repo.status() fails
+ # to report changes done by patch.patch(). These both
+ # appear to be bugs in other parts of Mercurial, but dying
+ # here, as soon as we can detect the problem, is preferable
+ # to silently dropping changesets on the floor.
+ raise RuntimeError('nothing committed after transplant')
if not merge:
self.transplants.set(n, node)
@@ -313,33 +289,22 @@ class transplanter(object):
def recover(self, repo):
'''commit working directory using journal metadata'''
node, user, date, message, parents = self.readlog()
- merge = False
+ merge = len(parents) == 2
if not user or not date or not message or not parents[0]:
raise util.Abort(_('transplant log file is corrupt'))
- parent = parents[0]
- if len(parents) > 1:
- if opts.get('parent'):
- parent = source.lookup(opts['parent'])
- if parent not in parents:
- raise util.Abort(_('%s is not a parent of %s') %
- (short(parent), short(node)))
- else:
- merge = True
-
extra = {'transplant_source': node}
wlock = repo.wlock()
try:
p1, p2 = repo.dirstate.parents()
- if p1 != parent:
+ if p1 != parents[0]:
raise util.Abort(
_('working dir not at transplant parent %s') %
- revlog.hex(parent))
+ revlog.hex(parents[0]))
if merge:
- repo.setparents(p1, parents[1])
- n = repo.commit(message, user, date, extra=extra,
- editor=self.editor)
+ repo.dirstate.setparents(p1, parents[1])
+ n = repo.commit(message, user, date, extra=extra)
if not n:
raise util.Abort(_('commit failed'))
if not merge:
@@ -496,9 +461,6 @@ def browserevs(ui, repo, nodes, opts):
('a', 'all', None, _('pull all changesets up to BRANCH')),
('p', 'prune', [], _('skip over REV'), _('REV')),
('m', 'merge', [], _('merge at REV'), _('REV')),
- ('', 'parent', '',
- _('parent to choose when transplanting merge'), _('REV')),
- ('e', 'edit', False, _('invoke editor on commit messages')),
('', 'log', None, _('append transplant info to log message')),
('c', 'continue', None, _('continue last transplant session '
'after repair')),
@@ -531,7 +493,7 @@ def transplant(ui, repo, *revs, **opts):
transplanted, otherwise you will be prompted to select the
changesets you want.
- :hg:`transplant --branch REV --all` will transplant the
+ :hg:`transplant --branch REVISION --all` will transplant the
selected branch (up to the named revision) onto your current
working directory.
@@ -540,9 +502,6 @@ def transplant(ui, repo, *revs, **opts):
of a merged transplant, and you can merge descendants of them
normally instead of transplanting them.
- Merge changesets may be transplanted directly by specifying the
- proper parent changeset by calling :hg:`transplant --parent`.
-
If no merges or revisions are provided, :hg:`transplant` will
start an interactive changeset browser.
@@ -590,8 +549,6 @@ def transplant(ui, repo, *revs, **opts):
opts['filter'] = ui.config('transplant', 'filter')
tp = transplanter(ui, repo)
- if opts.get('edit'):
- tp.editor = cmdutil.commitforceeditor
p1, p2 = repo.dirstate.parents()
if len(repo) > 0 and p1 == revlog.nullid:
@@ -605,9 +562,9 @@ def transplant(ui, repo, *revs, **opts):
sourcerepo = opts.get('source')
if sourcerepo:
- peer = hg.peer(ui, opts, ui.expandpath(sourcerepo))
- branches = map(peer.lookup, opts.get('branch', ()))
- source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
+ source = hg.peer(ui, opts, ui.expandpath(sourcerepo))
+ branches = map(source.lookup, opts.get('branch', ()))
+ source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, source,
onlyheads=branches, force=True)
else:
source = repo
@@ -657,9 +614,9 @@ def revsettransplanted(repo, subset, x):
Transplanted changesets in set, or all transplanted changesets.
"""
if x:
- s = revset.getset(repo, subset, x)
+ s = revset.getset(repo, subset, x)
else:
- s = subset
+ s = subset
return [r for r in s if repo[r].extra().get('transplant_source')]
def kwtransplanted(repo, ctx, **args):
diff --git a/hgext/win32mbcs.py b/hgext/win32mbcs.py
index 65f0854..8cda3f0 100644
--- a/hgext/win32mbcs.py
+++ b/hgext/win32mbcs.py
@@ -48,7 +48,6 @@ It is useful for the users who want to commit with UTF-8 log message.
import os, sys
from mercurial.i18n import _
from mercurial import util, encoding
-testedwith = 'internal'
_encoding = None # see extsetup
@@ -128,14 +127,11 @@ def wrapname(name, wrapper):
# NOTE: os.path.dirname() and os.path.basename() are safe because
# they use result of os.path.split()
funcs = '''os.path.join os.path.split os.path.splitext
- os.path.normpath os.makedirs
+ os.path.splitunc os.path.normpath os.path.normcase os.makedirs
mercurial.util.endswithsep mercurial.util.splitpath mercurial.util.checkcase
mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
mercurial.util.checkwinfilename mercurial.util.checkosfilename'''
-# List of Windows specific functions to be wrapped.
-winfuncs = '''os.path.splitunc'''
-
# codec and alias names of sjis and big5 to be faked.
problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
@@ -144,8 +140,7 @@ problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
def extsetup(ui):
# TODO: decide use of config section for this extension
- if ((not os.path.supports_unicode_filenames) and
- (sys.platform != 'cygwin')):
+ if not os.path.supports_unicode_filenames:
ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
return
# determine encoding for filename
@@ -155,9 +150,6 @@ def extsetup(ui):
if _encoding.lower() in problematic_encodings.split():
for f in funcs.split():
wrapname(f, wrapper)
- if os.name == 'nt':
- for f in winfuncs.split():
- wrapname(f, wrapper)
wrapname("mercurial.osutil.listdir", wrapperforlistdir)
# Check sys.args manually instead of using ui.debug() because
# command line options is not yet applied when
@@ -165,3 +157,4 @@ def extsetup(ui):
if '--debug' in sys.argv:
ui.write("[win32mbcs] activated with encoding: %s\n"
% _encoding)
+
diff --git a/hgext/win32text.py b/hgext/win32text.py
index a26c997..82e6aed 100644
--- a/hgext/win32text.py
+++ b/hgext/win32text.py
@@ -46,8 +46,6 @@ from mercurial.node import short
from mercurial import util
import re
-testedwith = 'internal'
-
# regexp for single LF without CR preceding.
re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
@@ -130,7 +128,7 @@ def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
data = c[f].data()
if not util.binary(data) and newline in data:
if not halt:
- ui.warn(_('attempt to commit or push text file(s) '
+ ui.warn(_('Attempt to commit or push text file(s) '
'using %s line endings\n') %
newlinestr[newline])
ui.warn(_('in %s: %s\n') % (short(c.node()), f))
diff --git a/hgext/zeroconf/Zeroconf.py b/hgext/zeroconf/Zeroconf.py
index e8dfa14..30bdc1a 100644
--- a/hgext/zeroconf/Zeroconf.py
+++ b/hgext/zeroconf/Zeroconf.py
@@ -17,8 +17,8 @@
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, see
- <http://www.gnu.org/licenses/>.
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
diff --git a/hgext/zeroconf/__init__.py b/hgext/zeroconf/__init__.py
index 52ceffa..e7d7364 100644
--- a/hgext/zeroconf/__init__.py
+++ b/hgext/zeroconf/__init__.py
@@ -32,8 +32,6 @@ from mercurial import extensions
from mercurial.hgweb import hgweb_mod
from mercurial.hgweb import hgwebdir_mod
-testedwith = 'internal'
-
# publish
server = None
@@ -46,7 +44,7 @@ def getip():
s.connect(('1.0.0.1', 0))
ip = s.getsockname()[0]
return ip
- except socket.error:
+ except:
pass
# Generic method, sometimes gives useless results
@@ -63,7 +61,7 @@ def getip():
s.connect(('1.0.0.1', 1))
ip = s.getsockname()[0]
return ip
- except socket.error:
+ except:
pass
return dumbip
@@ -121,8 +119,7 @@ class hgwebdirzc(hgwebdir_mod.hgwebdir):
name = os.path.basename(repo)
path = (prefix + repo).strip('/')
desc = u.config('web', 'description', name)
- publish(name, desc, path,
- util.getport(u.config("web", "port", 8000)))
+ publish(name, desc, path, util.getport(u.config("web", "port", 8000)))
# listen